Merge branch 'nf-next' of git://1984.lsi.us.es/net-next
authorDavid S. Miller <davem@davemloft.net>
Sun, 25 Dec 2011 07:21:45 +0000 (02:21 -0500)
committerDavid S. Miller <davem@davemloft.net>
Sun, 25 Dec 2011 07:21:45 +0000 (02:21 -0500)
1226 files changed:
CREDITS
Documentation/ABI/testing/sysfs-bus-rbd
Documentation/cgroups/memory.txt
Documentation/feature-removal-schedule.txt
Documentation/kernel-parameters.txt
Documentation/networking/batman-adv.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/packet_mmap.txt
Documentation/networking/scaling.txt
Documentation/networking/stmmac.txt
Documentation/sound/alsa/soc/machine.txt
Documentation/usb/linux-cdc-acm.inf
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/configs/omap1_defconfig
arch/arm/include/asm/unwind.h
arch/arm/kernel/perf_event.c
arch/arm/kernel/setup.c
arch/arm/kernel/unwind.c
arch/arm/mach-at91/at91rm9200_devices.c
arch/arm/mach-at91/at91sam9260.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/include/mach/system_rev.h
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-davinci/include/mach/psc.h
arch/arm/mach-davinci/psc.c
arch/arm/mach-exynos/mct.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-msm/devices-iommu.c
arch/arm/mach-mx5/board-mx51_babbage.c
arch/arm/mach-mx5/board-mx53_evk.c
arch/arm/mach-mx5/board-mx53_loco.c
arch/arm/mach-mx5/board-mx53_smd.c
arch/arm/mach-mx5/imx51-dt.c
arch/arm/mach-mx5/imx53-dt.c
arch/arm/mach-mxs/include/mach/mx28.h
arch/arm/mach-mxs/include/mach/mxs.h
arch/arm/mach-mxs/mach-m28evk.c
arch/arm/mach-mxs/mach-stmp378x_devb.c
arch/arm/mach-mxs/module-tx28.c
arch/arm/mach-omap1/clock_data.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/mcbsp.c
arch/arm/mach-prima2/pm.c
arch/arm/mach-prima2/prima2.c
arch/arm/mach-s3c64xx/dev-spi.c
arch/arm/mach-s3c64xx/s3c6400.c
arch/arm/mach-s3c64xx/setup-fb-24bpp.c
arch/arm/mach-s5pv210/mach-smdkv210.c
arch/arm/mach-sa1100/Makefile.boot
arch/arm/mach-shmobile/board-ag5evm.c
arch/arm/mach-shmobile/board-kota2.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/arm/plat-mxc/cpufreq.c
arch/arm/plat-mxc/pwm.c
arch/arm/plat-samsung/dev-backlight.c
arch/m68k/include/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/mips/kernel/perf_event_mipsxx.c
arch/s390/include/asm/pgtable.h
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/oprofile/init.c
arch/sh/boards/board-sh7757lcr.c
arch/sparc/kernel/ds.c
arch/sparc/kernel/prom_common.c
arch/sparc/mm/btfixup.c
arch/tile/include/asm/irq.h
arch/tile/kernel/irq.c
arch/tile/kernel/pci-dma.c
arch/tile/kernel/pci.c
arch/tile/kernel/sysfs.c
arch/tile/lib/exports.c
arch/tile/mm/homecache.c
arch/x86/Kconfig
arch/x86/include/asm/intel_scu_ipc.h
arch/x86/include/asm/mrst.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/system.h
arch/x86/include/asm/timer.h
arch/x86/include/asm/uv/uv_mmrs.h
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_amd_ibs.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/hpet.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/microcode_core.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/process.c
arch/x86/kernel/quirks.c
arch/x86/kernel/reboot.c
arch/x86/kernel/rtc.c
arch/x86/mm/gup.c
arch/x86/mm/highmem_32.c
arch/x86/net/bpf_jit_comp.c
arch/x86/oprofile/init.c
arch/x86/platform/efi/efi_32.c
arch/x86/platform/mrst/mrst.c
arch/x86/xen/setup.c
block/blk-core.c
block/cfq-iosched.c
drivers/ata/Kconfig
drivers/base/core.c
drivers/bcma/bcma_private.h
drivers/bcma/host_pci.c
drivers/bcma/main.c
drivers/bcma/sprom.c
drivers/block/cciss.c
drivers/block/loop.c
drivers/block/rbd.c
drivers/block/swim3.c
drivers/bluetooth/Kconfig
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_vhci.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/firmware/iscsi_ibft.c
drivers/firmware/iscsi_ibft_find.c
drivers/firmware/sigma.c
drivers/gpio/Makefile
drivers/gpio/gpio-da9052.c
drivers/gpio/gpio-ml-ioh.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-pl061.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvc0_graph.c
drivers/gpu/drm/nouveau/nvd0_display.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/radeon_acpi.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hwmon/jz4740-hwmon.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/infiniband/core/addr.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/input/misc/cma3000_d0x.c
drivers/input/mouse/synaptics.c
drivers/input/tablet/wacom_wac.c
drivers/iommu/intel-iommu.c
drivers/iommu/intr_remapping.c
drivers/md/bitmap.c
drivers/md/md.c
drivers/md/raid5.c
drivers/media/common/tuners/mxl5007t.c
drivers/media/common/tuners/tda18218.c
drivers/media/rc/ati_remote.c
drivers/media/rc/keymaps/rc-ati-x10.c
drivers/media/rc/keymaps/rc-medion-x10.c
drivers/media/rc/keymaps/rc-snapstream-firefly.c
drivers/media/video/au0828/au0828-cards.c
drivers/media/video/m5mols/m5mols.h
drivers/media/video/m5mols/m5mols_core.c
drivers/media/video/mt9m111.c
drivers/media/video/mt9t112.c
drivers/media/video/omap/omap_vout.c
drivers/media/video/omap1_camera.c
drivers/media/video/omap24xxcam-dma.c
drivers/media/video/omap3isp/ispvideo.c
drivers/media/video/ov6650.c
drivers/media/video/s5p-fimc/fimc-capture.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/s5p-fimc/fimc-core.h
drivers/media/video/s5p-fimc/fimc-mdevice.c
drivers/media/video/s5p-fimc/fimc-reg.c
drivers/media/video/s5p-mfc/s5p_mfc_enc.c
drivers/media/video/s5p-tv/mixer_video.c
drivers/media/video/sh_mobile_ceu_camera.c
drivers/media/video/sh_mobile_csi2.c
drivers/media/video/soc_camera.c
drivers/mfd/ab5500-debugfs.c
drivers/mfd/ab8500-core.c
drivers/mfd/adp5520.c
drivers/mfd/da903x.c
drivers/mfd/jz4740-adc.c
drivers/mfd/tps6586x.c
drivers/mfd/tps65910.c
drivers/mfd/twl-core.c
drivers/mfd/twl4030-irq.c
drivers/mfd/wm8994-core.c
drivers/mmc/card/block.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-cns3xxx.c
drivers/mmc/host/sdhci-dove.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci-of-hlwd.c
drivers/mmc/host/sdhci-pci.c
drivers/mmc/host/sdhci-pltfm.c
drivers/mmc/host/sdhci-pltfm.h
drivers/mmc/host/sdhci-pxav2.c
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/tmio_mmc_pio.c
drivers/mmc/host/vub300.c
drivers/mtd/maps/plat-ram.c
drivers/mtd/maps/pxa2xx-flash.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/ndfc.c
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_hsi.c
drivers/net/caif/caif_serial.c
drivers/net/caif/caif_shmcore.c
drivers/net/caif/caif_spi.c
drivers/net/can/vcan.c
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/amd/amd8111e.h
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/Makefile
drivers/net/ethernet/brocade/bna/bfa_cee.c
drivers/net/ethernet/brocade/bna/bfa_cee.h
drivers/net/ethernet/brocade/bna/bfa_defs.h
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/brocade/bna/bfa_ioc.h
drivers/net/ethernet/brocade/bna/bfi.h
drivers/net/ethernet/brocade/bna/bna_enet.c
drivers/net/ethernet/brocade/bna/bna_types.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/bnad.h
drivers/net/ethernet/brocade/bna/bnad_debugfs.c [new file with mode: 0644]
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
drivers/net/ethernet/chelsio/cxgb3/l2t.c
drivers/net/ethernet/chelsio/cxgb3/l2t.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/cisco/enic/enic_dev.c
drivers/net/ethernet/cisco/enic/enic_dev.h
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/dlink/de600.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/mellanox/mlx4/Makefile
drivers/net/ethernet/mellanox/mlx4/catas.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_port.h
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/icm.c
drivers/net/ethernet/mellanox/mlx4/intf.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/pd.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/profile.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/sense.c
drivers/net/ethernet/mellanox/mlx4/srq.c
drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/pasemi/Makefile
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/falcon.c
drivers/net/ethernet/sfc/mtd.c
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/Makefile
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c [new file with mode: 0644]
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/irda/donauboe.c
drivers/net/irda/smsc-ircc2.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/spi_ks8995.c [new file with mode: 0644]
drivers/net/ppp/pptp.c
drivers/net/team/team.c
drivers/net/usb/asix.c
drivers/net/usb/pegasus.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/wan/sbni.c
drivers/net/wan/sealevel.c
drivers/net/wimax/i2400m/tx.c
drivers/net/wimax/i2400m/usb-tx.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath5k/ahb.c
drivers/net/wireless/ath/ath5k/ani.c
drivers/net/wireless/ath/ath5k/ani.h
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/attach.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/caps.c
drivers/net/wireless/ath/ath5k/desc.c
drivers/net/wireless/ath/ath5k/desc.h
drivers/net/wireless/ath/ath5k/dma.c
drivers/net/wireless/ath/ath5k/gpio.c
drivers/net/wireless/ath/ath5k/initvals.c
drivers/net/wireless/ath/ath5k/pci.c
drivers/net/wireless/ath/ath5k/pcu.c
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath5k/qcu.c
drivers/net/wireless/ath/ath5k/reg.h
drivers/net/wireless/ath/ath5k/reset.c
drivers/net/wireless/ath/ath5k/rfbuffer.h
drivers/net/wireless/ath/ath5k/rfgain.h
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/ar9002_calib.c
drivers/net/wireless/ath/ath9k/ar9003_calib.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/ar9003_mci.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/ar9003_mci.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/btcoex.c
drivers/net/wireless/ath/ath9k/btcoex.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/dfs.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/dfs.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/dfs_debug.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/dfs_debug.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/eeprom_4k.c
drivers/net/wireless/ath/ath9k/eeprom_9287.c
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/hw-ops.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/mci.c
drivers/net/wireless/ath/ath9k/mci.h
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/regd.c
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/leds.c
drivers/net/wireless/b43/lo.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/phy_common.c
drivers/net/wireless/b43/phy_g.c
drivers/net/wireless/b43/phy_lp.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/phy_n.h
drivers/net/wireless/b43/pio.c
drivers/net/wireless/b43/radio_2056.c
drivers/net/wireless/b43/radio_2056.h
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/b43/tables_nphy.h
drivers/net/wireless/b43/xmit.c
drivers/net/wireless/b43legacy/dma.c
drivers/net/wireless/b43legacy/leds.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/b43legacy/radio.c
drivers/net/wireless/brcm80211/Kconfig
drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h [deleted file]
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
drivers/net/wireless/brcm80211/brcmsmac/d11.h
drivers/net/wireless/brcm80211/brcmsmac/dma.c
drivers/net/wireless/brcm80211/brcmsmac/dma.h
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmsmac/main.h
drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
drivers/net/wireless/brcm80211/brcmsmac/otp.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
drivers/net/wireless/brcm80211/brcmsmac/pmu.c
drivers/net/wireless/brcm80211/brcmsmac/pmu.h
drivers/net/wireless/brcm80211/brcmsmac/pub.h
drivers/net/wireless/brcm80211/brcmsmac/srom.c
drivers/net/wireless/brcm80211/brcmsmac/srom.h
drivers/net/wireless/brcm80211/brcmsmac/types.h
drivers/net/wireless/brcm80211/include/chipcommon.h
drivers/net/wireless/hostap/hostap_cs.c
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-2000.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-calib.c
drivers/net/wireless/iwlwifi/iwl-agn-calib.h
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.h
drivers/net/wireless/iwlwifi/iwl-agn-rx.c
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-agn.h
drivers/net/wireless/iwlwifi/iwl-commands.h
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-debug.h
drivers/net/wireless/iwlwifi/iwl-debugfs.c
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl-eeprom.c
drivers/net/wireless/iwlwifi/iwl-eeprom.h
drivers/net/wireless/iwlwifi/iwl-mac80211.c
drivers/net/wireless/iwlwifi/iwl-shared.h
drivers/net/wireless/iwlwifi/iwl-sv-open.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-testmode.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-testmode.h
drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/iwl-ucode.c [new file with mode: 0644]
drivers/net/wireless/iwmc3200wifi/main.c
drivers/net/wireless/iwmc3200wifi/rx.c
drivers/net/wireless/libertas/if_cs.c
drivers/net/wireless/libertas/if_spi.c
drivers/net/wireless/libertas_tf/main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cfg80211.h
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/main.c
drivers/net/wireless/p54/main.c
drivers/net/wireless/p54/p54spi.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/prism54/isl_ioctl.h
drivers/net/wireless/prism54/islpci_dev.c
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/ps.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/rtlwifi/rtl8192de/sw.c
drivers/net/wireless/rtlwifi/rtl8192se/phy.c
drivers/net/wireless/rtlwifi/rtl8192se/sw.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/wl1251/spi.c
drivers/net/wireless/wl12xx/Kconfig
drivers/net/wireless/wl12xx/Makefile
drivers/net/wireless/wl12xx/acx.c
drivers/net/wireless/wl12xx/acx.h
drivers/net/wireless/wl12xx/boot.c
drivers/net/wireless/wl12xx/cmd.c
drivers/net/wireless/wl12xx/cmd.h
drivers/net/wireless/wl12xx/conf.h
drivers/net/wireless/wl12xx/debug.h [new file with mode: 0644]
drivers/net/wireless/wl12xx/debugfs.c
drivers/net/wireless/wl12xx/event.c
drivers/net/wireless/wl12xx/event.h
drivers/net/wireless/wl12xx/init.c
drivers/net/wireless/wl12xx/init.h
drivers/net/wireless/wl12xx/io.c
drivers/net/wireless/wl12xx/io.h
drivers/net/wireless/wl12xx/main.c
drivers/net/wireless/wl12xx/ps.c
drivers/net/wireless/wl12xx/ps.h
drivers/net/wireless/wl12xx/reg.h
drivers/net/wireless/wl12xx/rx.c
drivers/net/wireless/wl12xx/scan.c
drivers/net/wireless/wl12xx/scan.h
drivers/net/wireless/wl12xx/sdio.c
drivers/net/wireless/wl12xx/sdio_test.c [deleted file]
drivers/net/wireless/wl12xx/spi.c
drivers/net/wireless/wl12xx/testmode.c
drivers/net/wireless/wl12xx/tx.c
drivers/net/wireless/wl12xx/tx.h
drivers/net/wireless/wl12xx/wl12xx.h
drivers/net/wireless/wl12xx/wl12xx_80211.h
drivers/net/wireless/wl12xx/wl12xx_platform_data.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/nfc/pn533.c
drivers/of/irq.c
drivers/oprofile/oprof.c
drivers/oprofile/oprofile_files.c
drivers/oprofile/oprofilefs.c
drivers/oprofile/timer_int.c
drivers/pci/ats.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/iov.c
drivers/pci/pci.c
drivers/platform/x86/toshiba_acpi.c
drivers/power/intel_mid_battery.c
drivers/ptp/ptp_clock.c
drivers/rapidio/devices/tsi721.c
drivers/rapidio/devices/tsi721.h
drivers/rtc/class.c
drivers/rtc/interface.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-s3c.c
drivers/s390/cio/chsc.c
drivers/s390/cio/cio.h
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/device_ops.c
drivers/s390/cio/io_sch.h
drivers/s390/crypto/ap_bus.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_scsi.c
drivers/sbus/char/bbc_i2c.c
drivers/sbus/char/display7seg.c
drivers/sbus/char/envctrl.c
drivers/sbus/char/flash.c
drivers/sbus/char/uctrl.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/spi/Kconfig
drivers/spi/spi-ath79.c
drivers/spi/spi-gpio.c
drivers/spi/spi-nuc900.c
drivers/ssb/driver_pcicore.c
drivers/ssb/pci.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/comedi/drivers/usbduxsigma.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/rts_pstor/rtsx.c
drivers/staging/tidspbridge/core/dsp-clock.c
drivers/staging/tidspbridge/rmgr/drv_interface.c
drivers/staging/usbip/vhci_rx.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_alua.c
drivers/target/target_core_cdb.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_rd.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
drivers/usb/class/cdc-acm.c
drivers/usb/dwc3/core.c
drivers/usb/gadget/amd5536udc.c
drivers/usb/gadget/epautoconf.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/f_serial.c
drivers/usb/gadget/fsl_mxc_udc.c
drivers/usb/gadget/fsl_qe_udc.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/fsl_usb2_udc.h
drivers/usb/gadget/m66592-udc.c
drivers/usb/gadget/net2280.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/s3c-hsudc.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/isp1760-if.c
drivers/usb/host/whci/qset.c
drivers/usb/host/xhci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/renesas_usbhs/mod.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/renesas_usbhs/mod_host.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/storage/unusual_devs.h
drivers/xen/swiotlb-xen.c
drivers/xen/xenbus/xenbus_xs.c
firmware/README.AddingFirmware
fs/btrfs/async-thread.c
fs/btrfs/async-thread.h
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/readdir.c
fs/cifs/smbencrypt.c
fs/configfs/inode.c
fs/configfs/mount.c
fs/dcache.c
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/file.c
fs/fuse/inode.c
fs/namespace.c
fs/ncpfs/inode.c
fs/nfs/file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nilfs2/ioctl.c
fs/proc/meminfo.c
fs/proc/root.c
fs/proc/stat.c
fs/seq_file.c
fs/ubifs/super.c
fs/xfs/xfs_acl.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_export.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_log.c
fs/xfs/xfs_sync.c
fs/xfs/xfs_trace.h
include/asm-generic/unistd.h
include/drm/drm_pciids.h
include/linux/bcma/bcma.h
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/blkdev.h
include/linux/clocksource.h
include/linux/compat.h
include/linux/dcache.h
include/linux/dma_remapping.h
include/linux/errqueue.h
include/linux/ethtool.h
include/linux/fs.h
include/linux/ftrace_event.h
include/linux/ieee80211.h
include/linux/if_ether.h
include/linux/if_vlan.h
include/linux/inet_diag.h
include/linux/init_task.h
include/linux/ipv6.h
include/linux/lockd/lockd.h
include/linux/log2.h
include/linux/memcontrol.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mm.h
include/linux/mmc/card.h
include/linux/netdevice.h
include/linux/netlink.h
include/linux/nfc.h
include/linux/nl80211.h
include/linux/pci_ids.h
include/linux/perf_event.h
include/linux/pkt_sched.h
include/linux/shrinker.h
include/linux/sigma.h
include/linux/skbuff.h
include/linux/sock_diag.h [new file with mode: 0644]
include/linux/ssb/ssb.h
include/linux/ssb/ssb_regs.h
include/linux/sunrpc/clnt.h
include/linux/tcp.h
include/linux/unix_diag.h [new file with mode: 0644]
include/linux/wl12xx.h
include/media/soc_camera.h
include/net/af_unix.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bluetooth/mgmt.h
include/net/caif/caif_spi.h
include/net/cfg80211.h
include/net/dst.h
include/net/flow.h
include/net/inet6_hashtables.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/ip.h
include/net/ip6_route.h
include/net/iucv/af_iucv.h
include/net/net_namespace.h
include/net/netfilter/nf_tproxy_core.h
include/net/netns/ipv4.h
include/net/netns/mib.h
include/net/netns/xfrm.h
include/net/nfc/nfc.h
include/net/protocol.h
include/net/red.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/net/sock.h
include/net/tcp.h
include/net/tcp_memcontrol.h [new file with mode: 0644]
include/net/udp.h
include/scsi/libfcoe.h
include/target/target_core_base.h
include/target/target_core_transport.h
include/xen/interface/io/xs_wire.h
init/Kconfig
ipc/mqueue.c
ipc/msgutil.c
kernel/cgroup.c
kernel/cpuset.c
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/irq/manage.c
kernel/jump_label.c
kernel/lockdep.c
kernel/printk.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_features.h
kernel/sched_rt.c
kernel/sysctl_binary.c
kernel/time/alarmtimer.c
kernel/time/clockevents.c
kernel/time/clocksource.c
kernel/time/tick-broadcast.c
kernel/timer.c
kernel/trace/ftrace.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
lib/dma-debug.c
lib/reciprocal_div.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/migrate.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/percpu.c
mm/slab.c
mm/vmalloc.c
mm/vmscan.c
net/8021q/vlan.c
net/8021q/vlan.h
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/8021q/vlan_gvrp.c
net/8021q/vlan_netlink.c
net/8021q/vlanproc.c
net/atm/clip.c
net/batman-adv/gateway_client.c
net/batman-adv/icmp_socket.c
net/batman-adv/routing.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/batman-adv/vis.c
net/bluetooth/bnep/core.c
net/bluetooth/cmtp/core.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bluetooth/sco.c
net/bluetooth/smp.c
net/bridge/br.c
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_forward.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_private.h
net/bridge/netfilter/ebt_log.c
net/caif/Kconfig
net/caif/Makefile
net/caif/caif_dev.c
net/caif/caif_usb.c [new file with mode: 0644]
net/caif/cfpkt_skbuff.c
net/caif/cfrfml.c
net/ceph/crush/mapper.c
net/core/Makefile
net/core/dst.c
net/core/ethtool.c
net/core/flow.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/request_sock.c
net/core/rtnetlink.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c [new file with mode: 0644]
net/dccp/ccids/ccid2.c
net/dccp/ccids/ccid3.c
net/dccp/ccids/lib/tfrc.c
net/dccp/ccids/lib/tfrc.h
net/dccp/dccp.h
net/dccp/diag.c
net/dccp/feat.c
net/dccp/minisocks.c
net/dccp/options.c
net/dccp/proto.c
net/decnet/dn_neigh.c
net/decnet/dn_route.c
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/proc.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_memcontrol.c [new file with mode: 0644]
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/tunnel4.c
net/ipv4/udp.c
net/ipv4/udp_diag.c [new file with mode: 0644]
net/ipv4/xfrm4_tunnel.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/anycast.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/irda/af_irda.c
net/irda/irlan/irlan_common.c
net/iucv/af_iucv.c
net/key/af_key.c
net/llc/af_llc.c
net/mac80211/Kconfig
net/mac80211/Makefile
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/debugfs.c
net/mac80211/debugfs_netdev.c
net/mac80211/driver-ops.h
net/mac80211/driver-trace.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rc80211_pid_algo.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/wme.c
net/mac80211/wme.h
net/mac80211/work.c
net/netfilter/core.c
net/netfilter/ipset/ip_set_getport.c
net/netfilter/ipvs/ip_vs_pe_sip.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_acct.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_conntrack_proto_udplite.c
net/netfilter/nf_conntrack_timestamp.c
net/netfilter/xt_NFQUEUE.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TCPOPTSTRIP.c
net/netfilter/xt_TEE.c
net/netfilter/xt_TPROXY.c
net/netfilter/xt_addrtype.c
net/netfilter/xt_connbytes.c
net/netfilter/xt_hashlimit.c
net/netfilter/xt_socket.c
net/netlabel/netlabel_addrlist.c
net/netlabel/netlabel_addrlist.h
net/netlabel/netlabel_domainhash.c
net/netlabel/netlabel_domainhash.h
net/netlabel/netlabel_kapi.c
net/netlabel/netlabel_mgmt.c
net/netlabel/netlabel_unlabeled.c
net/netlink/af_netlink.c
net/netlink/genetlink.c
net/nfc/Kconfig
net/nfc/Makefile
net/nfc/core.c
net/nfc/llcp/Kconfig [new file with mode: 0644]
net/nfc/llcp/commands.c [new file with mode: 0644]
net/nfc/llcp/llcp.c [new file with mode: 0644]
net/nfc/llcp/llcp.h [new file with mode: 0644]
net/nfc/llcp/sock.c [new file with mode: 0644]
net/nfc/nci/core.c
net/nfc/nci/data.c
net/nfc/nci/ntf.c
net/nfc/nci/rsp.c
net/nfc/netlink.c
net/nfc/nfc.h
net/nfc/rawsock.c
net/openvswitch/vport.c
net/packet/af_packet.c
net/rfkill/rfkill-gpio.c
net/rfkill/rfkill-regulator.c
net/rxrpc/ar-ack.c
net/rxrpc/ar-output.c
net/sched/cls_flow.c
net/sched/sch_choke.c
net/sched/sch_gred.c
net/sched/sch_hfsc.c
net/sched/sch_mqprio.c
net/sched/sch_netem.c
net/sched/sch_red.c
net/sched/sch_sfq.c
net/sched/sch_teql.c
net/sctp/associola.c
net/sctp/input.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sunrpc/addr.c
net/sunrpc/sched.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcauth_unix.c
net/sunrpc/xprt.c
net/tipc/ref.c
net/unix/Kconfig
net/unix/Makefile
net/unix/af_unix.c
net/unix/diag.c [new file with mode: 0644]
net/wireless/Kconfig
net/wireless/chan.c
net/wireless/core.h
net/wireless/mesh.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/sme.c
net/wireless/util.c
net/x25/af_x25.c
net/x25/x25_dev.c
net/x25/x25_route.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
security/apparmor/path.c
security/integrity/evm/evm_crypto.c
security/selinux/hooks.c
security/selinux/netport.c
security/tomoyo/realpath.c
sound/atmel/ac97c.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/sis7019.c
sound/soc/atmel/Kconfig
sound/soc/atmel/Makefile
sound/soc/atmel/playpaq_wm8510.c [deleted file]
sound/soc/codecs/Kconfig
sound/soc/codecs/ad1836.h
sound/soc/codecs/cs4270.c
sound/soc/codecs/cs42l51.c
sound/soc/codecs/jz4740.c
sound/soc/codecs/max9877.c
sound/soc/codecs/uda1380.c
sound/soc/codecs/wm8958-dsp2.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8996.c
sound/soc/fsl/mpc8610_hpcd.c
sound/soc/imx/Kconfig
sound/soc/kirkwood/Kconfig
sound/soc/mxs/mxs-pcm.c
sound/soc/mxs/mxs-sgtl5000.c
sound/soc/pxa/Kconfig
sound/soc/pxa/hx4700.c
sound/soc/samsung/jive_wm8750.c
sound/soc/samsung/smdk2443_wm9710.c
sound/soc/samsung/smdk_wm8994.c
sound/soc/samsung/speyside.c
sound/soc/soc-core.c
sound/soc/soc-utils.c
sound/usb/quirks-table.h
tools/perf/builtin-stat.c
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/session.c
tools/perf/util/trace-event-parse.c

diff --git a/CREDITS b/CREDITS
index 07e32a8..44fce98 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -688,10 +688,13 @@ S: Oxfordshire, UK.
 
 N: Kees Cook
 E: kees@outflux.net
-W: http://outflux.net/
-P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30  1975 1FFF 4BA9 1706 3E6D
-D: Minor updates to SCSI types, added /proc/pid/maps protection
+E: kees@ubuntu.com
+E: keescook@chromium.org
+W: http://outflux.net/blog/
+P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E  6E13 8972 F4DF DC6D C026
+D: Various security things, bug fixes, and documentation.
 S: (ask for current address)
+S: Portland, Oregon
 S: USA
 
 N: Robin Cornelius
index fa72ccb..dbedafb 100644 (file)
@@ -57,13 +57,6 @@ create_snap
 
         $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
 
-rollback_snap
-
-       Rolls back data to the specified snapshot. This goes over the entire
-       list of rados blocks and sends a rollback command to each.
-
-        $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
-
 snap_*
 
        A directory per each snapshot
index cc0ebc5..4d8774f 100644 (file)
@@ -44,8 +44,8 @@ Features:
  - oom-killer disable knob and oom-notifier
  - Root cgroup has no limit controls.
 
- Kernel memory and Hugepages are not under control yet. We just manage
- pages on LRU. To add more controls, we have to take care of performance.
+ Kernel memory support is work in progress, and the current version provides
+ basically functionality. (See Section 2.7)
 
 Brief summary of control files.
 
@@ -72,6 +72,9 @@ Brief summary of control files.
  memory.oom_control             # set/show oom controls.
  memory.numa_stat               # show the number of memory usage per numa node
 
+ memory.kmem.tcp.limit_in_bytes  # set/show hard limit for tcp buf memory
+ memory.kmem.tcp.usage_in_bytes  # show current tcp buf memory allocation
+
 1. History
 
 The memory controller has a long history. A request for comments for the memory
@@ -255,6 +258,27 @@ When oom event notifier is registered, event will be delivered.
   per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
   zone->lru_lock, it has no lock of its own.
 
+2.7 Kernel Memory Extension (CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
+
+With the Kernel memory extension, the Memory Controller is able to limit
+the amount of kernel memory used by the system. Kernel memory is fundamentally
+different than user memory, since it can't be swapped out, which makes it
+possible to DoS the system by consuming too much of this precious resource.
+
+Kernel memory limits are not imposed for the root cgroup. Usage for the root
+cgroup may or may not be accounted.
+
+Currently no soft limit is implemented for kernel memory. It is future work
+to trigger slab reclaim when those limits are reached.
+
+2.7.1 Current Kernel Memory resources accounted
+
+* sockets memory pressure: some sockets protocols have memory pressure
+thresholds. The Memory Controller allows them to be controlled individually
+per cgroup, instead of globally.
+
+* tcp memory pressure: sockets memory pressure for the tcp protocol.
+
 3. User Interface
 
 0. Configuration
index 3d84912..33f7327 100644 (file)
@@ -263,8 +263,7 @@ Who:        Ravikiran Thirumalai <kiran@scalex86.org>
 
 What:  Code that is now under CONFIG_WIRELESS_EXT_SYSFS
        (in net/core/net-sysfs.c)
-When:  After the only user (hal) has seen a release with the patches
-       for enough time, probably some time in 2010.
+When:  3.5
 Why:   Over 1K .text/.data size reduction, data is available in other
        ways (ioctls)
 Who:   Johannes Berg <johannes@sipsolutions.net>
index a0c5c5f..81c287f 100644 (file)
@@ -315,12 +315,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        CPU-intensive style benchmark, and it can vary highly in
                        a microbenchmark depending on workload and compiler.
 
-                       1: only for 32-bit processes
-                       2: only for 64-bit processes
+                       32: only for 32-bit processes
+                       64: only for 64-bit processes
                        on: enable for both 32- and 64-bit processes
                        off: disable for both 32- and 64-bit processes
 
-       amd_iommu=      [HW,X86-84]
+       amd_iommu=      [HW,X86-64]
                        Pass parameters to the AMD IOMMU driver in the system.
                        Possible values are:
                        fullflush - enable flushing of IO/TLB entries when
index c86d03f..221ad0c 100644 (file)
@@ -200,15 +200,16 @@ abled  during run time. Following log_levels are defined:
 
 0 - All  debug  output  disabled
 1 - Enable messages related to routing / flooding / broadcasting
-2 - Enable route or tt entry added / changed / deleted
-3 - Enable all messages
+2 - Enable messages related to route added / changed / deleted
+4 - Enable messages related to translation table operations
+7 - Enable all messages
 
 The debug output can be changed at runtime  using  the  file
 /sys/class/net/bat0/mesh/log_level. e.g.
 
 # echo 2 > /sys/class/net/bat0/mesh/log_level
 
-will enable debug messages for when routes or TTs change.
+will enable debug messages for when routes change.
 
 
 BATCTL
index cb2b1c6..ad3e80e 100644 (file)
@@ -295,11 +295,11 @@ tcp_max_ssthresh - INTEGER
        Default: 0 (off)
 
 tcp_max_syn_backlog - INTEGER
-       Maximal number of remembered connection requests, which are
-       still did not receive an acknowledgment from connecting client.
-       Default value is 1024 for systems with more than 128Mb of memory,
-       and 128 for low memory machines. If server suffers of overload,
-       try to increase this number.
+       Maximal number of remembered connection requests, which have not
+       received an acknowledgment from connecting client.
+       The minimal value is 128 for low memory machines, and it will
+       increase in proportion to the memory of machine.
+       If server suffers from overload, try increasing this number.
 
 tcp_max_tw_buckets - INTEGER
        Maximal number of timewait sockets held by system simultaneously.
index 4acea66..1c08a4b 100644 (file)
@@ -155,7 +155,7 @@ As capture, each frame contains two parts:
 
  /* fill sockaddr_ll struct to prepare binding */
  my_addr.sll_family = AF_PACKET;
- my_addr.sll_protocol = ETH_P_ALL;
+ my_addr.sll_protocol = htons(ETH_P_ALL);
  my_addr.sll_ifindex =  s_ifr.ifr_ifindex;
 
  /* bind socket to eth0 */
index a177de2..579994a 100644 (file)
@@ -208,7 +208,7 @@ The counter in rps_dev_flow_table values records the length of the current
 CPU's backlog when a packet in this flow was last enqueued. Each backlog
 queue has a head counter that is incremented on dequeue. A tail counter
 is computed as head counter + queue length. In other words, the counter
-in rps_dev_flow_table[i] records the last element in flow i that has
+in rps_dev_flow[i] records the last element in flow i that has
 been enqueued onto the currently designated CPU for flow i (of course,
 entry i is actually selected by hash and multiple flows may hash to the
 same entry i).
@@ -224,7 +224,7 @@ following is true:
 
 - The current CPU's queue head counter >= the recorded tail counter
   value in rps_dev_flow[i]
-- The current CPU is unset (equal to NR_CPUS)
+- The current CPU is unset (equal to RPS_NO_CPU)
 - The current CPU is offline
 
 After this check, the packet is sent to the (possibly updated) current
@@ -235,7 +235,7 @@ CPU.
 
 ==== RFS Configuration
 
-RFS is only available if the kconfig symbol CONFIG_RFS is enabled (on
+RFS is only available if the kconfig symbol CONFIG_RPS is enabled (on
 by default for SMP). The functionality remains disabled until explicitly
 configured. The number of entries in the global flow table is set through:
 
@@ -258,7 +258,7 @@ For a single queue device, the rps_flow_cnt value for the single queue
 would normally be configured to the same value as rps_sock_flow_entries.
 For a multi-queue device, the rps_flow_cnt for each queue might be
 configured as rps_sock_flow_entries / N, where N is the number of
-queues. So for instance, if rps_flow_entries is set to 32768 and there
+queues. So for instance, if rps_sock_flow_entries is set to 32768 and there
 are 16 configured receive queues, rps_flow_cnt for each queue might be
 configured as 2048.
 
index 8d67980..d0aeead 100644 (file)
@@ -4,14 +4,16 @@ Copyright (C) 2007-2010  STMicroelectronics Ltd
 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 
 This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
-(Synopsys IP blocks); it has been fully tested on STLinux platforms.
+(Synopsys IP blocks).
 
 Currently this network device driver is for all STM embedded MAC/GMAC
-(i.e. 7xxx/5xxx SoCs) and it's known working on other platforms i.e. ARM SPEAr.
+(i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
+FF1152AMT0221 D1215994A VIRTEX FPGA board.
 
-DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
-Universal version 4.0 have been used for developing the first code
-implementation.
+DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100
+Universal version 4.0 have been used for developing this driver.
+
+This driver supports both the platform bus and PCI.
 
 Please, for more information also visit: www.stlinux.com
 
@@ -277,5 +279,5 @@ In fact, these can generate an huge amount of debug messages.
 
 6) TODO:
  o XGMAC is not supported.
- o Review the timer optimisation code to use an embedded device that will be
-  available in new chip generations.
+ o Add the EEE - Energy Efficient Ethernet
+ o Add the PTP - precision time protocol
index 3e2ec9c..d50c14d 100644 (file)
@@ -50,8 +50,7 @@ Machine DAI Configuration
 The machine DAI configuration glues all the codec and CPU DAIs together. It can
 also be used to set up the DAI system clock and for any machine related DAI
 initialisation e.g. the machine audio map can be connected to the codec audio
-map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c
-for examples.
+map, unconnected codec pins can be set as such.
 
 struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
 
@@ -83,8 +82,7 @@ Machine Power Map
 The machine driver can optionally extend the codec power map and to become an
 audio power map of the audio subsystem. This allows for automatic power up/down
 of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack
-sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for
-details.
+sockets in the machine init function.
 
 
 Machine Controls
index 37a02ce..f0ffc27 100644 (file)
@@ -90,10 +90,10 @@ ServiceBinary=%12%\USBSER.sys
 [SourceDisksFiles]
 [SourceDisksNames]
 [DeviceList]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
 
 [DeviceList.NTamd64]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
 
 
 ;------------------------------------------------------------------------------
index 209ad06..aace417 100644 (file)
@@ -511,8 +511,8 @@ M:  Joerg Roedel <joerg.roedel@amd.com>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
 S:     Supported
-F:     arch/x86/kernel/amd_iommu*.c
-F:     arch/x86/include/asm/amd_iommu*.h
+F:     drivers/iommu/amd_iommu*.[ch]
+F:     include/linux/amd-iommu.h
 
 AMD MICROCODE UPDATE SUPPORT
 M:     Andreas Herrmann <andreas.herrmann3@amd.com>
@@ -1054,35 +1054,18 @@ ARM/SAMSUNG ARM ARCHITECTURES
 M:     Ben Dooks <ben-linux@fluff.org>
 M:     Kukjin Kim <kgene.kim@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 W:     http://www.fluff.org/ben/linux/
 S:     Maintained
 F:     arch/arm/plat-samsung/
 F:     arch/arm/plat-s3c24xx/
 F:     arch/arm/plat-s5p/
+F:     arch/arm/mach-s3c24*/
+F:     arch/arm/mach-s3c64xx/
 F:     drivers/*/*s3c2410*
 F:     drivers/*/*/*s3c2410*
-
-ARM/S3C2410 ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c2410/
-
-ARM/S3C244x ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c2440/
-F:     arch/arm/mach-s3c2443/
-
-ARM/S3C64xx ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c64xx/
+F:     drivers/spi/spi-s3c*
+F:     sound/soc/samsung/*
 
 ARM/S5P EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene.kim@samsung.com>
@@ -3118,6 +3101,7 @@ F:        include/linux/hid*
 
 HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Maintained
 F:     Documentation/timers/
 F:     kernel/hrtimer.c
@@ -3627,7 +3611,7 @@ F:        net/irda/
 IRQ SUBSYSTEM
 M:     Thomas Gleixner <tglx@linutronix.de>
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:     kernel/irq/
 
 ISAPNP
@@ -4115,7 +4099,7 @@ F:        drivers/hwmon/lm90.c
 LOCKDEP AND LOCKSTAT
 M:     Peter Zijlstra <peterz@infradead.org>
 M:     Ingo Molnar <mingo@redhat.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
 S:     Maintained
 F:     Documentation/lockdep*.txt
 F:     Documentation/lockstat.txt
@@ -4297,7 +4281,9 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:     Maintained
 F:     Documentation/dvb/
 F:     Documentation/video4linux/
+F:     Documentation/DocBook/media/
 F:     drivers/media/
+F:     drivers/staging/media/
 F:     include/media/
 F:     include/linux/dvb/
 F:     include/linux/videodev*.h
@@ -4319,8 +4305,9 @@ F:        include/linux/mm.h
 F:     mm/
 
 MEMORY RESOURCE CONTROLLER
+M:     Johannes Weiner <hannes@cmpxchg.org>
+M:     Michal Hocko <mhocko@suse.cz>
 M:     Balbir Singh <bsingharora@gmail.com>
-M:     Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
 M:     KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
@@ -5110,6 +5097,7 @@ M:        Peter Zijlstra <a.p.zijlstra@chello.nl>
 M:     Paul Mackerras <paulus@samba.org>
 M:     Ingo Molnar <mingo@elte.hu>
 M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:     Supported
 F:     kernel/events/*
 F:     include/linux/perf_event.h
@@ -5189,6 +5177,7 @@ F:        drivers/scsi/pm8001/
 
 POSIX CLOCKS and TIMERS
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Supported
 F:     fs/timerfd.c
 F:     include/linux/timer*
@@ -5393,6 +5382,7 @@ S:        Supported
 F:     drivers/scsi/qla4xxx/
 
 QLOGIC QLA3XXX NETWORK DRIVER
+M:     Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:     Ron Mercer <ron.mercer@qlogic.com>
 M:     linux-driver@qlogic.com
 L:     netdev@vger.kernel.org
@@ -5704,6 +5694,7 @@ F:        drivers/dma/dw_dmac.c
 TIMEKEEPING, NTP
 M:     John Stultz <johnstul@us.ibm.com>
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Supported
 F:     include/linux/clocksource.h
 F:     include/linux/time.h
@@ -5728,6 +5719,7 @@ F:        drivers/watchdog/sc1200wdt.c
 SCHEDULER
 M:     Ingo Molnar <mingo@elte.hu>
 M:     Peter Zijlstra <peterz@infradead.org>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
 S:     Maintained
 F:     kernel/sched*
 F:     include/linux/sched.h
@@ -5910,7 +5902,6 @@ F:        drivers/net/ethernet/emulex/benet/
 
 SFC NETWORK DRIVER
 M:     Solarflare linux maintainers <linux-net-drivers@solarflare.com>
-M:     Steve Hodgson <shodgson@solarflare.com>
 M:     Ben Hutchings <bhutchings@solarflare.com>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -6662,7 +6653,7 @@ TRACING
 M:     Steven Rostedt <rostedt@goodmis.org>
 M:     Frederic Weisbecker <fweisbec@gmail.com>
 M:     Ingo Molnar <mingo@redhat.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:     Maintained
 F:     Documentation/trace/ftrace.txt
 F:     arch/*/*/*/ftrace.h
@@ -7412,7 +7403,7 @@ M:        Thomas Gleixner <tglx@linutronix.de>
 M:     Ingo Molnar <mingo@redhat.com>
 M:     "H. Peter Anvin" <hpa@zytor.com>
 M:     x86@kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
 S:     Maintained
 F:     Documentation/x86/
 F:     arch/x86/
index 12aafc2..a43733d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
index e084b7e..776d76b 100644 (file)
@@ -220,8 +220,9 @@ config NEED_MACH_MEMORY_H
          be avoided when possible.
 
 config PHYS_OFFSET
-       hex "Physical address of main memory"
+       hex "Physical address of main memory" if MMU
        depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
+       default DRAM_BASE if !MMU
        help
          Please provide the physical address corresponding to the
          location of main memory in your system.
index a7e7775..945a34f 100644 (file)
@@ -48,12 +48,7 @@ CONFIG_MACH_SX1=y
 CONFIG_MACH_NOKIA770=y
 CONFIG_MACH_AMS_DELTA=y
 CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_ARM_216MHZ=y
-CONFIG_OMAP_ARM_195MHZ=y
-CONFIG_OMAP_ARM_192MHZ=y
 CONFIG_OMAP_ARM_182MHZ=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
 # CONFIG_ARM_THUMB is not set
 CONFIG_PCCARD=y
 CONFIG_OMAP_CF=y
index a5edf42..d1c3f3a 100644 (file)
@@ -30,14 +30,15 @@ enum unwind_reason_code {
 };
 
 struct unwind_idx {
-       unsigned long addr;
+       unsigned long addr_offset;
        unsigned long insn;
 };
 
 struct unwind_table {
        struct list_head list;
-       struct unwind_idx *start;
-       struct unwind_idx *stop;
+       const struct unwind_idx *start;
+       const struct unwind_idx *origin;
+       const struct unwind_idx *stop;
        unsigned long begin_addr;
        unsigned long end_addr;
 };
@@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
 extern void unwind_table_del(struct unwind_table *tab);
 extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
 
-#ifdef CONFIG_ARM_UNWIND
-extern int __init unwind_init(void);
-#else
-static inline int __init unwind_init(void)
-{
-       return 0;
-}
-#endif
-
 #endif /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_ARM_UNWIND
index c475379..88b0941 100644 (file)
@@ -353,15 +353,15 @@ validate_group(struct perf_event *event)
        fake_pmu.used_mask = fake_used_mask;
 
        if (!validate_event(&fake_pmu, leader))
-               return -ENOSPC;
+               return -EINVAL;
 
        list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
                if (!validate_event(&fake_pmu, sibling))
-                       return -ENOSPC;
+                       return -EINVAL;
        }
 
        if (!validate_event(&fake_pmu, event))
-               return -ENOSPC;
+               return -EINVAL;
 
        return 0;
 }
@@ -640,6 +640,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = {
 
 static int __devinit armpmu_device_probe(struct platform_device *pdev)
 {
+       if (!cpu_pmu)
+               return -ENODEV;
+
        cpu_pmu->plat_device = pdev;
        return 0;
 }
index 3448a3f..8fc2c8f 100644 (file)
@@ -895,8 +895,6 @@ void __init setup_arch(char **cmdline_p)
 {
        struct machine_desc *mdesc;
 
-       unwind_init();
-
        setup_processor();
        mdesc = setup_machine_fdt(__atags_pointer);
        if (!mdesc)
@@ -904,6 +902,12 @@ void __init setup_arch(char **cmdline_p)
        machine_desc = mdesc;
        machine_name = mdesc->name;
 
+#ifdef CONFIG_ZONE_DMA
+       if (mdesc->dma_zone_size) {
+               extern unsigned long arm_dma_zone_size;
+               arm_dma_zone_size = mdesc->dma_zone_size;
+       }
+#endif
        if (mdesc->soft_reboot)
                reboot_setup("s");
 
@@ -934,12 +938,6 @@ void __init setup_arch(char **cmdline_p)
 
        tcm_init();
 
-#ifdef CONFIG_ZONE_DMA
-       if (mdesc->dma_zone_size) {
-               extern unsigned long arm_dma_zone_size;
-               arm_dma_zone_size = mdesc->dma_zone_size;
-       }
-#endif
 #ifdef CONFIG_MULTI_IRQ_HANDLER
        handle_arch_irq = mdesc->handle_irq;
 #endif
index e7e8365..00df012 100644 (file)
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
 
 struct unwind_ctrl_block {
        unsigned long vrs[16];          /* virtual register set */
-       unsigned long *insn;            /* pointer to the current instructions word */
+       const unsigned long *insn;      /* pointer to the current instructions word */
        int entries;                    /* number of entries left to interpret */
        int byte;                       /* current byte number in the instructions word */
 };
@@ -83,8 +83,9 @@ enum regs {
        PC = 15
 };
 
-extern struct unwind_idx __start_unwind_idx[];
-extern struct unwind_idx __stop_unwind_idx[];
+extern const struct unwind_idx __start_unwind_idx[];
+static const struct unwind_idx *__origin_unwind_idx;
+extern const struct unwind_idx __stop_unwind_idx[];
 
 static DEFINE_SPINLOCK(unwind_lock);
 static LIST_HEAD(unwind_tables);
@@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables);
 })
 
 /*
- * Binary search in the unwind index. The entries entries are
+ * Binary search in the unwind index. The entries are
  * guaranteed to be sorted in ascending order by the linker.
+ *
+ * start = first entry
+ * origin = first entry with positive offset (or stop if there is no such entry)
+ * stop - 1 = last entry
  */
-static struct unwind_idx *search_index(unsigned long addr,
-                                      struct unwind_idx *first,
-                                      struct unwind_idx *last)
+static const struct unwind_idx *search_index(unsigned long addr,
+                                      const struct unwind_idx *start,
+                                      const struct unwind_idx *origin,
+                                      const struct unwind_idx *stop)
 {
-       pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
+       unsigned long addr_prel31;
+
+       pr_debug("%s(%08lx, %p, %p, %p)\n",
+                       __func__, addr, start, origin, stop);
+
+       /*
+        * only search in the section with the matching sign. This way the
+        * prel31 numbers can be compared as unsigned longs.
+        */
+       if (addr < (unsigned long)start)
+               /* negative offsets: [start; origin) */
+               stop = origin;
+       else
+               /* positive offsets: [origin; stop) */
+               start = origin;
+
+       /* prel31 for address relavive to start */
+       addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
 
-       if (addr < first->addr) {
+       while (start < stop - 1) {
+               const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+               /*
+                * As addr_prel31 is relative to start an offset is needed to
+                * make it relative to mid.
+                */
+               if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
+                               mid->addr_offset)
+                       stop = mid;
+               else {
+                       /* keep addr_prel31 relative to start */
+                       addr_prel31 -= ((unsigned long)mid -
+                                       (unsigned long)start);
+                       start = mid;
+               }
+       }
+
+       if (likely(start->addr_offset <= addr_prel31))
+               return start;
+       else {
                pr_warning("unwind: Unknown symbol address %08lx\n", addr);
                return NULL;
-       } else if (addr >= last->addr)
-               return last;
+       }
+}
 
-       while (first < last - 1) {
-               struct unwind_idx *mid = first + ((last - first + 1) >> 1);
+static const struct unwind_idx *unwind_find_origin(
+               const struct unwind_idx *start, const struct unwind_idx *stop)
+{
+       pr_debug("%s(%p, %p)\n", __func__, start, stop);
+       while (start < stop) {
+               const struct unwind_idx *mid = start + ((stop - start) >> 1);
 
-               if (addr < mid->addr)
-                       last = mid;
+               if (mid->addr_offset >= 0x40000000)
+                       /* negative offset */
+                       start = mid + 1;
                else
-                       first = mid;
+                       /* positive offset */
+                       stop = mid;
        }
-
-       return first;
+       pr_debug("%s -> %p\n", __func__, stop);
+       return stop;
 }
 
-static struct unwind_idx *unwind_find_idx(unsigned long addr)
+static const struct unwind_idx *unwind_find_idx(unsigned long addr)
 {
-       struct unwind_idx *idx = NULL;
+       const struct unwind_idx *idx = NULL;
        unsigned long flags;
 
        pr_debug("%s(%08lx)\n", __func__, addr);
 
-       if (core_kernel_text(addr))
+       if (core_kernel_text(addr)) {
+               if (unlikely(!__origin_unwind_idx))
+                       __origin_unwind_idx =
+                               unwind_find_origin(__start_unwind_idx,
+                                               __stop_unwind_idx);
+
                /* main unwind table */
                idx = search_index(addr, __start_unwind_idx,
-                                  __stop_unwind_idx - 1);
-       else {
+                                  __origin_unwind_idx,
+                                  __stop_unwind_idx);
+       } else {
                /* module unwind tables */
                struct unwind_table *table;
 
@@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
                        if (addr >= table->begin_addr &&
                            addr < table->end_addr) {
                                idx = search_index(addr, table->start,
-                                                  table->stop - 1);
+                                                  table->origin,
+                                                  table->stop);
                                /* Move-to-front to exploit common traces */
                                list_move(&table->list, &unwind_tables);
                                break;
@@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
 int unwind_frame(struct stackframe *frame)
 {
        unsigned long high, low;
-       struct unwind_idx *idx;
+       const struct unwind_idx *idx;
        struct unwind_ctrl_block ctrl;
 
        /* only go to a higher address on the stack */
@@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
                                      unsigned long text_size)
 {
        unsigned long flags;
-       struct unwind_idx *idx;
        struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
 
        pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
@@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
        if (!tab)
                return tab;
 
-       tab->start = (struct unwind_idx *)start;
-       tab->stop = (struct unwind_idx *)(start + size);
+       tab->start = (const struct unwind_idx *)start;
+       tab->stop = (const struct unwind_idx *)(start + size);
+       tab->origin = unwind_find_origin(tab->start, tab->stop);
        tab->begin_addr = text_addr;
        tab->end_addr = text_addr + text_size;
 
-       /* Convert the symbol addresses to absolute values */
-       for (idx = tab->start; idx < tab->stop; idx++)
-               idx->addr = prel31_to_addr(&idx->addr);
-
        spin_lock_irqsave(&unwind_lock, flags);
        list_add_tail(&tab->list, &unwind_tables);
        spin_unlock_irqrestore(&unwind_lock, flags);
@@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab)
 
        kfree(tab);
 }
-
-int __init unwind_init(void)
-{
-       struct unwind_idx *idx;
-
-       /* Convert the symbol addresses to absolute values */
-       for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
-               idx->addr = prel31_to_addr(&idx->addr);
-
-       pr_debug("unwind: ARM stack unwinding initialised\n");
-
-       return 0;
-}
index 66591fa..ad93068 100644 (file)
@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index b84a9f6..0d20677 100644 (file)
@@ -195,9 +195,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
-       CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
-       CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
-       CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+       CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+       CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+       CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
        CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
        /* more usart lookup table for DT entries */
        CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
index 25e3464..629fa97 100644 (file)
@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index ae78f4d..a178b58 100644 (file)
@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index ad017eb..d5fbac9 100644 (file)
@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index 8f48660..ec164a4 100644 (file)
@@ -19,7 +19,7 @@
 #define BOARD_HAVE_NAND_16BIT  (1 << 31)
 static inline int board_have_nand_16bit(void)
 {
-       return system_rev & BOARD_HAVE_NAND_16BIT;
+       return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0;
 }
 
 #endif /* __ARCH_SYSTEM_REV_H__ */
index 1d7d249..6659a90 100644 (file)
@@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = {
        .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
        .tdm_slots      = 2,
        .serial_dir     = da850_iis_serializer_direction,
-       .asp_chan_q     = EVENTQ_1,
+       .asp_chan_q     = EVENTQ_0,
        .version        = MCASP_VERSION_2,
        .txnumevt       = 1,
        .rxnumevt       = 1,
index 1918ae7..46e1f41 100644 (file)
@@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = {
                /* UBL (a few copies) plus U-Boot */
                .name           = "bootloader",
                .offset         = 0,
-               .size           = 28 * NAND_BLOCK_SIZE,
+               .size           = 30 * NAND_BLOCK_SIZE,
                .mask_flags     = MTD_WRITEABLE, /* force read-only */
        }, {
                /* U-Boot environment */
index e574d7f..635bf77 100644 (file)
@@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
        int val;
        u32 value;
 
-       if (!vpif_vsclkdis_reg || !cpld_client)
+       if (!vpif_vidclkctl_reg || !cpld_client)
                return -ENXIO;
 
        val = i2c_smbus_read_byte(cpld_client);
@@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                return val;
 
        spin_lock_irqsave(&vpif_reg_lock, flags);
-       value = __raw_readl(vpif_vsclkdis_reg);
+       value = __raw_readl(vpif_vidclkctl_reg);
        if (mux_mode) {
                val &= VPIF_INPUT_TWO_CHANNEL;
                value |= VIDCH1CLK;
@@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                val |= VPIF_INPUT_ONE_CHANNEL;
                value &= ~VIDCH1CLK;
        }
-       __raw_writel(value, vpif_vsclkdis_reg);
+       __raw_writel(value, vpif_vidclkctl_reg);
        spin_unlock_irqrestore(&vpif_reg_lock, flags);
 
        err = i2c_smbus_write_byte(cpld_client, val);
index 0b68ed5..af27c13 100644 (file)
@@ -161,7 +161,6 @@ static struct clk dsp_clk = {
        .name = "dsp",
        .parent = &pll1_sysclk1,
        .lpsc = DM646X_LPSC_C64X_CPU,
-       .flags = PSC_DSP,
        .usecount = 1,                  /* REVISIT how to disable? */
 };
 
index fa59c09..8bc3fc2 100644 (file)
 #define PTCMD          0x120
 #define PTSTAT         0x128
 #define PDSTAT         0x200
-#define PDCTL1         0x304
+#define PDCTL          0x300
 #define MDSTAT         0x800
 #define MDCTL          0xA00
 
 #define PSC_STATE_ENABLE       3
 
 #define MDSTAT_STATE_MASK      0x3f
+#define PDSTAT_STATE_MASK      0x1f
 #define MDCTL_FORCE            BIT(31)
+#define PDCTL_NEXT             BIT(1)
+#define PDCTL_EPCGOOD          BIT(8)
 
 #ifndef __ASSEMBLER__
 
index 1fb6bdf..d7e210f 100644 (file)
@@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
 void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                unsigned int id, bool enable, u32 flags)
 {
-       u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
+       u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
        void __iomem *psc_base;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
        u32 next_state = PSC_STATE_ENABLE;
@@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                mdctl |= MDCTL_FORCE;
        __raw_writel(mdctl, psc_base + MDCTL + 4 * id);
 
-       pdstat = __raw_readl(psc_base + PDSTAT);
-       if ((pdstat & 0x00000001) == 0) {
-               pdctl1 = __raw_readl(psc_base + PDCTL1);
-               pdctl1 |= 0x1;
-               __raw_writel(pdctl1, psc_base + PDCTL1);
+       pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
+       if ((pdstat & PDSTAT_STATE_MASK) == 0) {
+               pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+               pdctl |= PDCTL_NEXT;
+               __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
 
                ptcmd = 1 << domain;
                __raw_writel(ptcmd, psc_base + PTCMD);
@@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                        epcpr = __raw_readl(psc_base + EPCPR);
                } while ((((epcpr >> domain) & 1) == 0));
 
-               pdctl1 = __raw_readl(psc_base + PDCTL1);
-               pdctl1 |= 0x100;
-               __raw_writel(pdctl1, psc_base + PDCTL1);
+               pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+               pdctl |= PDCTL_EPCGOOD;
+               __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
        } else {
                ptcmd = 1 << domain;
                __raw_writel(ptcmd, psc_base + PTCMD);
index 97343df..85b5527 100644 (file)
@@ -44,8 +44,6 @@ struct mct_clock_event_device {
        char name[10];
 };
 
-static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
-
 static void exynos4_mct_write(unsigned int value, void *addr)
 {
        void __iomem *stat_addr;
@@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void)
 }
 
 #ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
 /* Clock event handling */
 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
 {
@@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
 
 void local_timer_stop(struct clock_event_device *evt)
 {
+       unsigned int cpu = smp_processor_id();
        evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
        if (mct_int_type == MCT_INT_SPI)
-               disable_irq(evt->irq);
+               if (cpu == 0)
+                       remove_irq(evt->irq, &mct_tick0_event_irq);
+               else
+                       remove_irq(evt->irq, &mct_tick1_event_irq);
        else
                disable_percpu_irq(IRQ_MCT_LOCALTIMER);
 }
@@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void)
 
        clk_rate = clk_get_rate(mct_clk);
 
+#ifdef CONFIG_LOCAL_TIMERS
        if (mct_int_type == MCT_INT_PPI) {
                int err;
 
@@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void)
                WARN(err, "MCT: can't request IRQ %d (%d)\n",
                     IRQ_MCT_LOCALTIMER, err);
        }
+#endif /* CONFIG_LOCAL_TIMERS */
 }
 
 static void __init exynos4_timer_init(void)
index 9cd860a..8deb012 100644 (file)
@@ -37,14 +37,15 @@ static void __init imx6q_map_io(void)
        imx6q_clock_map_io();
 }
 
-static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
+static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 7; /* imx6q gets 7 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx6q_irq_match[] __initconst = {
index 24030d0..0fb7a17 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/bootmem.h>
+#include <linux/module.h>
 #include <mach/irqs.h>
 #include <mach/iommu.h>
 
index 5c83760..24994bb 100644 (file)
@@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void)
 {
        iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
        iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
-               PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+               PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
 
        imx51_soc_init();
 
index 6bea31a..64bbfce 100644 (file)
@@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void)
        gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_evk_fec_pdata = {
+static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 7678f77..237bdec 100644 (file)
@@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void)
        gpio_set_value(LOCO_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_loco_fec_data = {
+static const struct fec_platform_data mx53_loco_fec_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 59c0845..d42132a 100644 (file)
@@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void)
        gpio_set_value(SMD_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_smd_fec_data = {
+static const struct fec_platform_data mx53_smd_fec_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index ccc6158..596edd9 100644 (file)
@@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
        { /* sentinel */ }
 };
 
-static void __init imx51_tzic_add_irq_domain(struct device_node *np,
+static int __init imx51_tzic_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
        irq_domain_add_simple(np, 0);
+       return 0;
 }
 
-static void __init imx51_gpio_add_irq_domain(struct device_node *np,
+static int __init imx51_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 4; /* imx51 gets 4 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx51_irq_match[] __initconst = {
index ccaa0b8..85bfd5f 100644 (file)
@@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
        { /* sentinel */ }
 };
 
-static void __init imx53_tzic_add_irq_domain(struct device_node *np,
+static int __init imx53_tzic_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
        irq_domain_add_simple(np, 0);
+       return 0;
 }
 
-static void __init imx53_gpio_add_irq_domain(struct device_node *np,
+static int __init imx53_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 7; /* imx53 gets 7 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx53_irq_match[] __initconst = {
index 75d8611..30c7990 100644 (file)
 #define MX28_INT_CAN1                  9
 #define MX28_INT_LRADC_TOUCH           10
 #define MX28_INT_HSADC                 13
-#define MX28_INT_IRADC_THRESH0         14
-#define MX28_INT_IRADC_THRESH1         15
+#define MX28_INT_LRADC_THRESH0         14
+#define MX28_INT_LRADC_THRESH1         15
 #define MX28_INT_LRADC_CH0             16
 #define MX28_INT_LRADC_CH1             17
 #define MX28_INT_LRADC_CH2             18
index 0d2d2b4..bde5f66 100644 (file)
@@ -30,6 +30,7 @@
  */
 #define cpu_is_mx23()          (                                       \
                machine_is_mx23evk() ||                                 \
+               machine_is_stmp378x() ||                                \
                0)
 #define cpu_is_mx28()          (                                       \
                machine_is_mx28evk() ||                                 \
index 3b1681e..6b00577 100644 (file)
@@ -361,6 +361,6 @@ static struct sys_timer m28evk_timer = {
 MACHINE_START(M28EVK, "DENX M28 EVK")
        .map_io         = mx28_map_io,
        .init_irq       = mx28_init_irq,
-       .init_machine   = m28evk_init,
        .timer          = &m28evk_timer,
+       .init_machine   = m28evk_init,
 MACHINE_END
index 177e531..6834dea 100644 (file)
@@ -115,6 +115,6 @@ static struct sys_timer stmp378x_dvb_timer = {
 MACHINE_START(STMP378X, "STMP378X")
        .map_io         = mx23_map_io,
        .init_irq       = mx23_init_irq,
-       .init_machine   = stmp378x_dvb_init,
        .timer          = &stmp378x_dvb_timer,
+       .init_machine   = stmp378x_dvb_init,
 MACHINE_END
index 0fcff47..9a7b08b 100644 (file)
@@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = {
        MX28_PAD_ENET0_CRS__ENET1_RX_EN,
 };
 
-static struct fec_platform_data tx28_fec0_data = {
+static const struct fec_platform_data tx28_fec0_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
-static struct fec_platform_data tx28_fec1_data = {
+static const struct fec_platform_data tx28_fec1_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 1297bb5..9ff90a7 100644 (file)
@@ -16,6 +16,8 @@
 
 #include <linux/kernel.h>
 #include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
 #include <linux/io.h>
 
 #include <asm/mach-types.h>  /* for machine_is_* */
@@ -927,16 +929,22 @@ int __init omap1_clk_init(void)
 
 void __init omap1_clk_late_init(void)
 {
-       if (ck_dpll1.rate >= OMAP1_DPLL1_SANE_VALUE)
+       unsigned long rate = ck_dpll1.rate;
+
+       if (rate >= OMAP1_DPLL1_SANE_VALUE)
                return;
 
+       /* System booting at unusable rate, force reprogramming of DPLL1 */
+       ck_dpll1_p->rate = 0;
+
        /* Find the highest supported frequency and enable it */
        if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
                pr_err("System frequencies not set, using default. Check your config.\n");
                omap_writew(0x2290, DPLL_CTL);
-               omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
+               omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL);
                ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
        }
        propagate_rate(&ck_dpll1);
        omap1_show_rates();
+       loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
 }
index ba1aa07..c15c5c9 100644 (file)
@@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = {
 static void __init rx51_charger_init(void)
 {
        WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
-               GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+               GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
 
        platform_device_register(&rx51_charger_device);
 }
index 292eee3..28fcb27 100644 (file)
@@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
                pdata->reg_size = 4;
                pdata->has_ccr = true;
        }
+       pdata->set_clk_src = omap2_mcbsp_set_clk_src;
+       if (id == 1)
+               pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
 
        if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
                if (id == 2)
@@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
                                        name, oh->name);
                return PTR_ERR(pdev);
        }
-       pdata->set_clk_src = omap2_mcbsp_set_clk_src;
-       if (id == 1)
-               pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
        omap_mcbsp_count++;
        return 0;
 }
index cb53160..26ebb57 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/suspend.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
index ef555c0..a12b689 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <asm/sizes.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <linux/of.h>
index 5e6b420..3341fd1 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <linux/export.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/gpio.h>
index 7a3bc32..51c00f2 100644 (file)
@@ -70,7 +70,7 @@ void __init s3c6400_init_irq(void)
        s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
 }
 
-struct sysdev_class s3c6400_sysclass = {
+static struct sysdev_class s3c6400_sysclass = {
        .name   = "s3c6400-core",
 };
 
index 83d2afb..2cf8002 100644 (file)
@@ -20,7 +20,7 @@
 #include <plat/fb.h>
 #include <plat/gpio-cfg.h>
 
-extern void s3c64xx_fb_gpio_setup_24bpp(void)
+void s3c64xx_fb_gpio_setup_24bpp(void)
 {
        s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
        s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
index a9106c3..8662ef6 100644 (file)
@@ -273,6 +273,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
 
 static struct platform_pwm_backlight_data smdkv210_bl_data = {
        .pwm_id = 3,
+       .pwm_period_ns = 1000,
 };
 
 static void __init smdkv210_map_io(void)
index 5a616f6..f7951aa 100644 (file)
@@ -1,5 +1,5 @@
-ifeq ($(CONFIG_ARCH_SA1100),y)
-   zreladdr-$(CONFIG_SA1111)           += 0xc0208000
+ifeq ($(CONFIG_SA1111),y)
+   zreladdr-y  += 0xc0208000
 else
    zreladdr-y  += 0xc0008000
 endif
index b862e9f..7119b87 100644 (file)
@@ -607,6 +607,7 @@ struct sys_timer ag5evm_timer = {
 
 MACHINE_START(AG5EVM, "ag5evm")
        .map_io         = ag5evm_map_io,
+       .nr_irqs        = NR_IRQS_LEGACY,
        .init_irq       = sh73a0_init_irq,
        .handle_irq     = shmobile_handle_irq_gic,
        .init_machine   = ag5evm_init,
index bd9a784..f44150b 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/input/sh_keysc.h>
 #include <linux/gpio_keys.h>
 #include <linux/leds.h>
+#include <linux/platform_data/leds-renesas-tpu.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/sh_mmcif.h>
 #include <linux/mfd/tmio.h>
@@ -56,7 +57,7 @@ static struct resource smsc9220_resources[] = {
                .flags          = IORESOURCE_MEM,
        },
        [1] = {
-               .start          = gic_spi(33), /* PINTA2 @ PORT144 */
+               .start          = SH73A0_PINT0_IRQ(2), /* PINTA2 */
                .flags          = IORESOURCE_IRQ,
        },
 };
@@ -157,10 +158,6 @@ static struct platform_device gpio_keys_device = {
 #define GPIO_LED(n, g) { .name = n, .gpio = g }
 
 static struct gpio_led gpio_leds[] = {
-       GPIO_LED("V2513", GPIO_PORT153), /* PORT153 [TPU1T02] -> V2513 */
-       GPIO_LED("V2514", GPIO_PORT199), /* PORT199 [TPU4TO1] -> V2514 */
-       GPIO_LED("V2515", GPIO_PORT197), /* PORT197 [TPU2TO1] -> V2515 */
-       GPIO_LED("KEYLED", GPIO_PORT163), /* PORT163 [TPU3TO0] -> KEYLED */
        GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */
        GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */
        GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */
@@ -179,6 +176,119 @@ static struct platform_device gpio_leds_device = {
        },
 };
 
+/* TPU LED */
+static struct led_renesas_tpu_config led_renesas_tpu12_pdata = {
+       .name           = "V2513",
+       .pin_gpio_fn    = GPIO_FN_TPU1TO2,
+       .pin_gpio       = GPIO_PORT153,
+       .channel_offset = 0x90,
+       .timer_bit = 2,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu12_resources[] = {
+       [0] = {
+               .name   = "TPU12",
+               .start  = 0xe6610090,
+               .end    = 0xe66100b5,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu12_device = {
+       .name = "leds-renesas-tpu",
+       .id = 12,
+       .dev = {
+               .platform_data  = &led_renesas_tpu12_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu12_resources),
+       .resource       = tpu12_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu41_pdata = {
+       .name           = "V2514",
+       .pin_gpio_fn    = GPIO_FN_TPU4TO1,
+       .pin_gpio       = GPIO_PORT199,
+       .channel_offset = 0x50,
+       .timer_bit = 1,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu41_resources[] = {
+       [0] = {
+               .name   = "TPU41",
+               .start  = 0xe6640050,
+               .end    = 0xe6640075,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu41_device = {
+       .name = "leds-renesas-tpu",
+       .id = 41,
+       .dev = {
+               .platform_data  = &led_renesas_tpu41_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu41_resources),
+       .resource       = tpu41_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu21_pdata = {
+       .name           = "V2515",
+       .pin_gpio_fn    = GPIO_FN_TPU2TO1,
+       .pin_gpio       = GPIO_PORT197,
+       .channel_offset = 0x50,
+       .timer_bit = 1,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu21_resources[] = {
+       [0] = {
+               .name   = "TPU21",
+               .start  = 0xe6620050,
+               .end    = 0xe6620075,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu21_device = {
+       .name = "leds-renesas-tpu",
+       .id = 21,
+       .dev = {
+               .platform_data  = &led_renesas_tpu21_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu21_resources),
+       .resource       = tpu21_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu30_pdata = {
+       .name           = "KEYLED",
+       .pin_gpio_fn    = GPIO_FN_TPU3TO0,
+       .pin_gpio       = GPIO_PORT163,
+       .channel_offset = 0x10,
+       .timer_bit = 0,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu30_resources[] = {
+       [0] = {
+               .name   = "TPU30",
+               .start  = 0xe6630010,
+               .end    = 0xe6630035,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu30_device = {
+       .name = "leds-renesas-tpu",
+       .id = 30,
+       .dev = {
+               .platform_data  = &led_renesas_tpu30_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu30_resources),
+       .resource       = tpu30_resources,
+};
+
 /* MMCIF */
 static struct resource mmcif_resources[] = {
        [0] = {
@@ -291,6 +401,10 @@ static struct platform_device *kota2_devices[] __initdata = {
        &keysc_device,
        &gpio_keys_device,
        &gpio_leds_device,
+       &leds_tpu12_device,
+       &leds_tpu41_device,
+       &leds_tpu21_device,
+       &leds_tpu30_device,
        &mmcif_device,
        &sdhi0_device,
        &sdhi1_device,
@@ -317,18 +431,6 @@ static void __init kota2_map_io(void)
        shmobile_setup_console();
 }
 
-#define PINTER0A       0xe69000a0
-#define PINTCR0A       0xe69000b0
-
-void __init kota2_init_irq(void)
-{
-       sh73a0_init_irq();
-
-       /* setup PINT: enable PINTA2 as active low */
-       __raw_writel(1 << 29, PINTER0A);
-       __raw_writew(2 << 10, PINTCR0A);
-}
-
 static void __init kota2_init(void)
 {
        sh73a0_pinmux_init();
@@ -447,7 +549,8 @@ struct sys_timer kota2_timer = {
 
 MACHINE_START(KOTA2, "kota2")
        .map_io         = kota2_map_io,
-       .init_irq       = kota2_init_irq,
+       .nr_irqs        = NR_IRQS_LEGACY,
+       .init_irq       = sh73a0_init_irq,
        .handle_irq     = shmobile_handle_irq_gic,
        .init_machine   = kota2_init,
        .timer          = &kota2_timer,
index 61a846b..1370a89 100644 (file)
@@ -113,6 +113,12 @@ static struct clk main_clk = {
        .ops            = &main_clk_ops,
 };
 
+/* Divide Main clock by two */
+static struct clk main_div2_clk = {
+       .ops            = &div2_clk_ops,
+       .parent         = &main_clk,
+};
+
 /* PLL0, PLL1, PLL2, PLL3 */
 static unsigned long pll_recalc(struct clk *clk)
 {
@@ -181,6 +187,7 @@ static struct clk *main_clks[] = {
        &extal1_div2_clk,
        &extal2_div2_clk,
        &main_clk,
+       &main_div2_clk,
        &pll0_clk,
        &pll1_clk,
        &pll2_clk,
@@ -243,7 +250,7 @@ static struct clk div6_clks[DIV6_NR] = {
        [DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
        [DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
        [DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
-       [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
+       [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, CLK_ENABLE_ON_INIT),
        [DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
        [DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
        [DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
@@ -268,6 +275,7 @@ enum { MSTP001,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
        MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
        MSTP314, MSTP313, MSTP312, MSTP311,
+       MSTP303, MSTP302, MSTP301, MSTP300,
        MSTP411, MSTP410, MSTP403,
        MSTP_NR };
 
@@ -301,6 +309,10 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
        [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
        [MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
+       [MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */
+       [MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */
+       [MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */
+       [MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */
        [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
        [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
        [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
@@ -350,6 +362,10 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
        CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
        CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.12", &mstp_clks[MSTP303]), /* TPU1 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.21", &mstp_clks[MSTP302]), /* TPU2 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.30", &mstp_clks[MSTP301]), /* TPU3 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.41", &mstp_clks[MSTP300]), /* TPU4 */
        CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
        CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
        CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
index 74aac96..adbff70 100644 (file)
@@ -17,6 +17,7 @@
  * the CPU clock speed on the fly.
  */
 
+#include <linux/module.h>
 #include <linux/cpufreq.h>
 #include <linux/clk.h>
 #include <linux/err.h>
index 42d74ea..845de59 100644 (file)
@@ -32,6 +32,9 @@
 #define MX3_PWMSAR                0x0C    /* PWM Sample Register */
 #define MX3_PWMPR                 0x10    /* PWM Period Register */
 #define MX3_PWMCR_PRESCALER(x)    (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN                (1 << 24)
+#define MX3_PWMCR_WAITEN                (1 << 23)
+#define MX3_PWMCR_DBGEN                        (1 << 22)
 #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
 #define MX3_PWMCR_CLKSRC_IPG      (1 << 16)
 #define MX3_PWMCR_EN              (1 << 0)
@@ -77,7 +80,9 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
                writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
                writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
 
-               cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+               cr = MX3_PWMCR_PRESCALER(prescale) |
+                       MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+                       MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
 
                if (cpu_is_mx25())
                        cr |= MX3_PWMCR_CLKSRC_IPG;
index e657305..a976c02 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/pwm_backlight.h>
-#include <linux/slab.h>
 
 #include <plat/devs.h>
 #include <plat/gpio-cfg.h>
index 43f984e..303192f 100644 (file)
 #define __NR_clock_adjtime     342
 #define __NR_syncfs            343
 #define __NR_setns             344
+#define __NR_process_vm_readv  345
+#define __NR_process_vm_writev 346
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            345
+#define NR_syscalls            347
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index c468f2e..ce827b3 100644 (file)
@@ -365,4 +365,6 @@ ENTRY(sys_call_table)
        .long sys_clock_adjtime
        .long sys_syncfs
        .long sys_setns
+       .long sys_process_vm_readv      /* 345 */
+       .long sys_process_vm_writev
 
index 4f2971b..315fc0b 100644 (file)
@@ -623,7 +623,7 @@ static int mipspmu_event_init(struct perf_event *event)
        if (!atomic_inc_not_zero(&active_events)) {
                if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
                        atomic_dec(&active_events);
-                       return -ENOSPC;
+                       return -EINVAL;
                }
 
                mutex_lock(&pmu_reserve_mutex);
@@ -732,15 +732,15 @@ static int validate_group(struct perf_event *event)
        memset(&fake_cpuc, 0, sizeof(fake_cpuc));
 
        if (!validate_event(&fake_cpuc, leader))
-               return -ENOSPC;
+               return -EINVAL;
 
        list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
                if (!validate_event(&fake_cpuc, sibling))
-                       return -ENOSPC;
+                       return -EINVAL;
        }
 
        if (!validate_event(&fake_cpuc, event))
-               return -ENOSPC;
+               return -EINVAL;
 
        return 0;
 }
index 524d23b..4f289ff 100644 (file)
@@ -599,10 +599,10 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
        skey = page_get_storage_key(address);
        bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
        /* Clear page changed & referenced bit in the storage key */
-       if (bits) {
-               skey ^= bits;
-               page_set_storage_key(address, skey, 1);
-       }
+       if (bits & _PAGE_CHANGED)
+               page_set_storage_key(address, skey ^ bits, 1);
+       else if (bits)
+               page_reset_referenced(address);
        /* Transfer page changed & referenced bit to guest bits in pgste */
        pgste_val(pgste) |= bits << 48;         /* RCP_GR_BIT & RCP_GC_BIT */
        /* Get host changed & referenced bits from pgste */
index 450931a..573bc29 100644 (file)
@@ -296,13 +296,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                     ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
                        /* Invalid psw mask. */
                        return -EINVAL;
-               if (addr == (addr_t) &dummy->regs.psw.addr)
-                       /*
-                        * The debugger changed the instruction address,
-                        * reset system call restart, see signal.c:do_signal
-                        */
-                       task_thread_info(child)->system_call = 0;
-
                *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
 
        } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -614,11 +607,6 @@ static int __poke_user_compat(struct task_struct *child,
                        /* Transfer 31 bit amode bit to psw mask. */
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
                                (__u64)(tmp & PSW32_ADDR_AMODE);
-                       /*
-                        * The debugger changed the instruction address,
-                        * reset system call restart, see signal.c:do_signal
-                        */
-                       task_thread_info(child)->system_call = 0;
                } else {
                        /* gpr 0-15 */
                        *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
@@ -905,6 +893,14 @@ static int s390_last_break_get(struct task_struct *target,
        return 0;
 }
 
+static int s390_last_break_set(struct task_struct *target,
+                              const struct user_regset *regset,
+                              unsigned int pos, unsigned int count,
+                              const void *kbuf, const void __user *ubuf)
+{
+       return 0;
+}
+
 #endif
 
 static int s390_system_call_get(struct task_struct *target,
@@ -951,6 +947,7 @@ static const struct user_regset s390_regsets[] = {
                .size = sizeof(long),
                .align = sizeof(long),
                .get = s390_last_break_get,
+               .set = s390_last_break_set,
        },
 #endif
        [REGSET_SYSTEM_CALL] = {
@@ -1116,6 +1113,14 @@ static int s390_compat_last_break_get(struct task_struct *target,
        return 0;
 }
 
+static int s390_compat_last_break_set(struct task_struct *target,
+                                     const struct user_regset *regset,
+                                     unsigned int pos, unsigned int count,
+                                     const void *kbuf, const void __user *ubuf)
+{
+       return 0;
+}
+
 static const struct user_regset s390_compat_regsets[] = {
        [REGSET_GENERAL] = {
                .core_note_type = NT_PRSTATUS,
@@ -1139,6 +1144,7 @@ static const struct user_regset s390_compat_regsets[] = {
                .size = sizeof(long),
                .align = sizeof(long),
                .get = s390_compat_last_break_get,
+               .set = s390_compat_last_break_set,
        },
        [REGSET_SYSTEM_CALL] = {
                .core_note_type = NT_S390_SYSTEM_CALL,
index e58a462..e54c4ff 100644 (file)
@@ -579,7 +579,7 @@ static unsigned long __init find_crash_base(unsigned long crash_size,
                *msg = "first memory chunk must be at least crashkernel size";
                return 0;
        }
-       if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE))
+       if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
                return OLDMEM_BASE;
 
        for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
index 05a85bc..7f6f9f3 100644 (file)
@@ -460,9 +460,9 @@ void do_signal(struct pt_regs *regs)
                                                     regs->svc_code >> 16);
                                break;
                        }
-                       /* No longer in a system call */
-                       clear_thread_flag(TIF_SYSCALL);
                }
+               /* No longer in a system call */
+               clear_thread_flag(TIF_SYSCALL);
 
                if ((is_compat_task() ?
                     handle_signal32(signr, &ka, &info, oldset, regs) :
@@ -486,6 +486,7 @@ void do_signal(struct pt_regs *regs)
        }
 
        /* No handlers present - check for system call restart */
+       clear_thread_flag(TIF_SYSCALL);
        if (current_thread_info()->system_call) {
                regs->svc_code = current_thread_info()->system_call;
                switch (regs->gprs[2]) {
@@ -500,9 +501,6 @@ void do_signal(struct pt_regs *regs)
                        regs->gprs[2] = regs->orig_gpr2;
                        set_thread_flag(TIF_SYSCALL);
                        break;
-               default:
-                       clear_thread_flag(TIF_SYSCALL);
-                       break;
                }
        }
 
index 6efc18b..bd58b72 100644 (file)
@@ -88,7 +88,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        if (oprofile_started)
index ec8c84c..895e337 100644 (file)
@@ -50,9 +50,9 @@ static struct platform_device heartbeat_device = {
 #define GBECONT                0xffc10100
 #define GBECONT_RMII1  BIT(17)
 #define GBECONT_RMII0  BIT(16)
-static void sh7757_eth_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_set_mdio_gate(void *addr)
 {
-       if ((addr & 0x00000fff) < 0x0800)
+       if (((unsigned long)addr & 0x00000fff) < 0x0800)
                writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
        else
                writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
@@ -116,9 +116,9 @@ static struct platform_device sh7757_eth1_device = {
        },
 };
 
-static void sh7757_eth_giga_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_giga_set_mdio_gate(void *addr)
 {
-       if ((addr & 0x00000fff) < 0x0800) {
+       if (((unsigned long)addr & 0x00000fff) < 0x0800) {
                gpio_set_value(GPIO_PTT4, 1);
                writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
        } else {
@@ -210,8 +210,12 @@ static struct resource sh_mmcif_resources[] = {
 };
 
 static struct sh_mmcif_dma sh7757lcr_mmcif_dma = {
-       .chan_priv_tx   = SHDMA_SLAVE_MMCIF_TX,
-       .chan_priv_rx   = SHDMA_SLAVE_MMCIF_RX,
+       .chan_priv_tx   = {
+               .slave_id = SHDMA_SLAVE_MMCIF_TX,
+       },
+       .chan_priv_rx   = {
+               .slave_id = SHDMA_SLAVE_MMCIF_RX,
+       }
 };
 
 static struct sh_mmcif_plat_data sh_mmcif_plat = {
index 7429b47..381edcd 100644 (file)
@@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
 
        dp->rcv_buf_len = 4096;
 
-       dp->ds_states = kzalloc(sizeof(ds_states_template),
-                               GFP_KERNEL);
+       dp->ds_states = kmemdup(ds_states_template,
+                               sizeof(ds_states_template), GFP_KERNEL);
        if (!dp->ds_states)
                goto out_free_rcv_buf;
 
-       memcpy(dp->ds_states, ds_states_template,
-              sizeof(ds_states_template));
        dp->num_ds_states = ARRAY_SIZE(ds_states_template);
 
        for (i = 0; i < dp->num_ds_states; i++)
index 4661480..741df91 100644 (file)
@@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
        void *new_val;
        int err;
 
-       new_val = kmalloc(len, GFP_KERNEL);
+       new_val = kmemdup(val, len, GFP_KERNEL);
        if (!new_val)
                return -ENOMEM;
 
-       memcpy(new_val, val, len);
-
        err = -ENODEV;
 
        mutex_lock(&of_set_property_mutex);
index 5175ac2..8a7f817 100644 (file)
@@ -302,8 +302,7 @@ void __init btfixup(void)
                                case 'i':       /* INT */
                                        if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
                                                set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
-                                       else if ((insn & 0x80002000) == 0x80002000 &&
-                                                (insn & 0x01800000) != 0x01800000) /* %LO */
+                                       else if ((insn & 0x80002000) == 0x80002000) /* %LO */
                                                set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
                                        else {
                                                prom_printf(insn_i, p, addr, insn);
index 94e9a51..f80f8ce 100644 (file)
@@ -74,16 +74,6 @@ enum {
  */
 void tile_irq_activate(unsigned int irq, int tile_irq_type);
 
-/*
- * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
- * how to use enable/disable_percpu_irq() to manage interrupts on each
- * core.  We can't use the generic enable/disable_irq() because they
- * use a single reference count per irq, rather than per cpu per irq.
- */
-void enable_percpu_irq(unsigned int irq);
-void disable_percpu_irq(unsigned int irq);
-
-
 void setup_irq_regs(void);
 
 #endif /* _ASM_TILE_IRQ_H */
index aa0134d..02e6280 100644 (file)
@@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
  * Remove an irq from the disabled mask.  If we're in an interrupt
  * context, defer enabling the HW interrupt until we leave.
  */
-void enable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_enable(struct irq_data *d)
 {
-       get_cpu_var(irq_disable_mask) &= ~(1UL << irq);
+       get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
        if (__get_cpu_var(irq_depth) == 0)
-               unmask_irqs(1UL << irq);
+               unmask_irqs(1UL << d->irq);
        put_cpu_var(irq_disable_mask);
 }
-EXPORT_SYMBOL(enable_percpu_irq);
 
 /*
  * Add an irq to the disabled mask.  We disable the HW interrupt
@@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq);
  * in an interrupt context, the return path is careful to avoid
  * unmasking a newly disabled interrupt.
  */
-void disable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_disable(struct irq_data *d)
 {
-       get_cpu_var(irq_disable_mask) |= (1UL << irq);
-       mask_irqs(1UL << irq);
+       get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
+       mask_irqs(1UL << d->irq);
        put_cpu_var(irq_disable_mask);
 }
-EXPORT_SYMBOL(disable_percpu_irq);
 
 /* Mask an interrupt. */
 static void tile_irq_chip_mask(struct irq_data *d)
@@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d)
 
 static struct irq_chip tile_irq_chip = {
        .name = "tile_irq_chip",
+       .irq_enable = tile_irq_chip_enable,
+       .irq_disable = tile_irq_chip_disable,
        .irq_ack = tile_irq_chip_ack,
        .irq_eoi = tile_irq_chip_eoi,
        .irq_mask = tile_irq_chip_mask,
index 658f2ce..b3ed19f 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
+#include <linux/export.h>
 #include <asm/tlbflush.h>
 #include <asm/homecache.h>
 
index 2a8014c..9d610d3 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
+#include <linux/export.h>
 
 #include <asm/processor.h>
 #include <asm/sections.h>
index b671a86..6029082 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/cpu.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
+#include <linux/stat.h>
 #include <hv/hypervisor.h>
 
 /* Return a string queried from the hypervisor, truncated to page size. */
index a87d2a8..2a81d32 100644 (file)
@@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm);
 EXPORT_SYMBOL(current_text_addr);
 EXPORT_SYMBOL(dump_stack);
 
+/* arch/tile/kernel/head.S */
+EXPORT_SYMBOL(empty_zero_page);
+
 /* arch/tile/lib/, various memcpy files */
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(__copy_to_user_inatomic);
index cbe6f4f..1cc6ae4 100644 (file)
@@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order)
        VM_BUG_ON(!virt_addr_valid((void *)addr));
        page = virt_to_page((void *)addr);
        if (put_page_testzero(page)) {
-               int pages = (1 << order);
                homecache_change_page_home(page, order, initial_page_home());
-               while (pages--)
-                       __free_page(page++);
+               if (order == 0) {
+                       free_hot_cold_page(page, 0);
+               } else {
+                       init_page_count(page);
+                       __free_pages(page, order);
+               }
        }
 }
index cb9a104..efb4294 100644 (file)
@@ -390,7 +390,7 @@ config X86_INTEL_CE
          This option compiles in support for the CE4100 SOC for settop
          boxes and media devices.
 
-config X86_INTEL_MID
+config X86_WANT_INTEL_MID
        bool "Intel MID platform support"
        depends on X86_32
        depends on X86_EXTENDED_PLATFORM
@@ -399,7 +399,10 @@ config X86_INTEL_MID
          systems which do not have the PCI legacy interfaces (Moorestown,
          Medfield). If you are building for a PC class system say N here.
 
-if X86_INTEL_MID
+if X86_WANT_INTEL_MID
+
+config X86_INTEL_MID
+       bool
 
 config X86_MRST
        bool "Moorestown MID platform"
@@ -411,6 +414,7 @@ config X86_MRST
        select SPI
        select INTEL_SCU_IPC
        select X86_PLATFORM_DEVICES
+       select X86_INTEL_MID
        ---help---
          Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
          Internet Device(MID) platform. Moorestown consists of two chips:
index 4420993..925b605 100644 (file)
@@ -3,11 +3,15 @@
 
 #include <linux/notifier.h>
 
-#define IPCMSG_VRTC    0xFA     /* Set vRTC device */
-
-/* Command id associated with message IPCMSG_VRTC */
-#define IPC_CMD_VRTC_SETTIME      1 /* Set time */
-#define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
+#define IPCMSG_WARM_RESET      0xF0
+#define IPCMSG_COLD_RESET      0xF1
+#define IPCMSG_SOFT_RESET      0xF2
+#define IPCMSG_COLD_BOOT       0xF3
+
+#define IPCMSG_VRTC            0xFA     /* Set vRTC device */
+       /* Command id associated with message IPCMSG_VRTC */
+       #define IPC_CMD_VRTC_SETTIME      1 /* Set time */
+       #define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
 
 /* Read single register */
 int intel_scu_ipc_ioread8(u16 addr, u8 *data);
index e628312..93f7909 100644 (file)
@@ -31,11 +31,20 @@ enum mrst_cpu_type {
 };
 
 extern enum mrst_cpu_type __mrst_cpu_chip;
+
+#ifdef CONFIG_X86_INTEL_MID
+
 static inline enum mrst_cpu_type mrst_identify_cpu(void)
 {
        return __mrst_cpu_chip;
 }
 
+#else /* !CONFIG_X86_INTEL_MID */
+
+#define mrst_identify_cpu()    (0)
+
+#endif /* !CONFIG_X86_INTEL_MID */
+
 enum mrst_timer_options {
        MRST_TIMER_DEFAULT,
        MRST_TIMER_APBT_ONLY,
index 084ef95..95203d4 100644 (file)
@@ -169,7 +169,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
        return native_write_msr_safe(msr, low, high);
 }
 
-/* rdmsr with exception handling */
+/*
+ * rdmsr with exception handling.
+ *
+ * Please note that the exception handling works only after we've
+ * switched to the "smart" #GP handler in trap_init() which knows about
+ * exception tables - using this macro earlier than that causes machine
+ * hangs on boxes which do not implement the @msr in the first argument.
+ */
 #define rdmsr_safe(msr, p1, p2)                                        \
 ({                                                             \
        int __err;                                              \
index c2ff2a1..2d2f01c 100644 (file)
@@ -401,6 +401,7 @@ extern unsigned long arch_align_stack(unsigned long sp);
 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 
 void default_idle(void);
+bool set_pm_idle_to_default(void);
 
 void stop_this_cpu(void *dummy);
 
index fa7b917..431793e 100644 (file)
@@ -32,6 +32,22 @@ extern int no_timer_check;
  *  (mathieu.desnoyers@polymtl.ca)
  *
  *                     -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ *
+ * In:
+ *
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * Although we may still have enough bits to store the value of ns,
+ * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
+ * leading to an incorrect result.
+ *
+ * To avoid this, we can decompose 'cycles' into quotient and remainder
+ * of division by SC.  Then,
+ *
+ * ns = (quot * SC + rem) * cyc2ns_scale / SC
+ *    = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
+ *
+ *                     - sqazi@google.com
  */
 
 DECLARE_PER_CPU(unsigned long, cyc2ns);
@@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
 
 static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
 {
+       unsigned long long quot;
+       unsigned long long rem;
        int cpu = smp_processor_id();
        unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
-       ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
+       quot = (cyc >> CYC2NS_SCALE_FACTOR);
+       rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
+       ns += quot * per_cpu(cyc2ns, cpu) +
+               ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
        return ns;
 }
 
index 10474fb..cf1d736 100644 (file)
@@ -57,6 +57,7 @@
 
 #define UV1_HUB_PART_NUMBER    0x88a5
 #define UV2_HUB_PART_NUMBER    0x8eb8
+#define UV2_HUB_PART_NUMBER_X  0x1111
 
 /* Compat: if this #define is present, UV headers support UV2 */
 #define UV2_HUB_IS_SUPPORTED   1
index 62ae300..9d59bba 100644 (file)
@@ -93,6 +93,8 @@ static int __init early_get_pnodeid(void)
 
        if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
                uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+       if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X)
+               uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
 
        uv_hub_info->hub_revision = uv_min_hub_revision_id;
        pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
index c7e46cb..0bab2b1 100644 (file)
@@ -442,8 +442,6 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
 
 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
 {
-       u32 dummy;
-
        early_init_amd_mc(c);
 
        /*
@@ -473,12 +471,12 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
                        set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
        }
 #endif
-
-       rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 }
 
 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 {
+       u32 dummy;
+
 #ifdef CONFIG_SMP
        unsigned long long value;
 
@@ -657,6 +655,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                        checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
                }
        }
+
+       rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 }
 
 #ifdef CONFIG_X86_32
index a71efcd..97b2635 100644 (file)
@@ -547,6 +547,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
 
                if (tmp != mask_lo) {
                        printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
+                       add_taint(TAINT_FIRMWARE_WORKAROUND);
                        mask_lo = tmp;
                }
        }
@@ -693,6 +694,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
 
        /* Disable MTRRs, and set the default type to uncached */
        mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
+       wbinvd();
 }
 
 static void post_set(void) __releases(set_atomicity_lock)
index 6408910..2bda212 100644 (file)
@@ -312,12 +312,8 @@ int x86_setup_perfctr(struct perf_event *event)
                        return -EOPNOTSUPP;
        }
 
-       /*
-        * Do not allow config1 (extended registers) to propagate,
-        * there's no sane user-space generalization yet:
-        */
        if (attr->type == PERF_TYPE_RAW)
-               return 0;
+               return x86_pmu_extra_regs(event->attr.config, event);
 
        if (attr->type == PERF_TYPE_HW_CACHE)
                return set_ext_hw_attr(hwc, event);
@@ -588,7 +584,7 @@ done:
                                x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
                }
        }
-       return num ? -ENOSPC : 0;
+       return num ? -EINVAL : 0;
 }
 
 /*
@@ -607,7 +603,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
 
        if (is_x86_event(leader)) {
                if (n >= max_count)
-                       return -ENOSPC;
+                       return -EINVAL;
                cpuc->event_list[n] = leader;
                n++;
        }
@@ -620,7 +616,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
                        continue;
 
                if (n >= max_count)
-                       return -ENOSPC;
+                       return -EINVAL;
 
                cpuc->event_list[n] = event;
                n++;
@@ -1316,7 +1312,7 @@ static int validate_event(struct perf_event *event)
        c = x86_pmu.get_event_constraints(fake_cpuc, event);
 
        if (!c || !c->weight)
-               ret = -ENOSPC;
+               ret = -EINVAL;
 
        if (x86_pmu.put_event_constraints)
                x86_pmu.put_event_constraints(fake_cpuc, event);
@@ -1341,7 +1337,7 @@ static int validate_group(struct perf_event *event)
 {
        struct perf_event *leader = event->group_leader;
        struct cpu_hw_events *fake_cpuc;
-       int ret = -ENOSPC, n;
+       int ret = -EINVAL, n;
 
        fake_cpuc = allocate_fake_cpuc();
        if (IS_ERR(fake_cpuc))
index ab6343d..3b8a2d3 100644 (file)
@@ -199,8 +199,7 @@ static int force_ibs_eilvt_setup(void)
                goto out;
        }
 
-       pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
-       pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
+       pr_info("IBS: LVT offset %d assigned\n", offset);
 
        return 0;
 out:
@@ -265,19 +264,23 @@ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *h
 static __init int amd_ibs_init(void)
 {
        u32 caps;
-       int ret;
+       int ret = -EINVAL;
 
        caps = __get_ibs_caps();
        if (!caps)
                return -ENODEV; /* ibs not supported by the cpu */
 
-       if (!ibs_eilvt_valid()) {
-               ret = force_ibs_eilvt_setup();
-               if (ret) {
-                       pr_err("Failed to setup IBS, %d\n", ret);
-                       return ret;
-               }
-       }
+       /*
+        * Force LVT offset assignment for family 10h: The offsets are
+        * not assigned by the BIOS for this family, so the OS is
+        * responsible for doing it. If the OS assignment fails, fall
+        * back to BIOS settings and try to setup this.
+        */
+       if (boot_cpu_data.x86 == 0x10)
+               force_ibs_eilvt_setup();
+
+       if (!ibs_eilvt_valid())
+               goto out;
 
        get_online_cpus();
        ibs_caps = caps;
@@ -287,7 +290,11 @@ static __init int amd_ibs_init(void)
        smp_call_function(setup_APIC_ibs, NULL, 1);
        put_online_cpus();
 
-       return perf_event_ibs_init();
+       ret = perf_event_ibs_init();
+out:
+       if (ret)
+               pr_err("Failed to setup IBS, %d\n", ret);
+       return ret;
 }
 
 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
index 2be5ebe..8d601b1 100644 (file)
@@ -1545,6 +1545,13 @@ static void intel_clovertown_quirks(void)
        x86_pmu.pebs_constraints = NULL;
 }
 
+static void intel_sandybridge_quirks(void)
+{
+       printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
+       x86_pmu.pebs = 0;
+       x86_pmu.pebs_constraints = NULL;
+}
+
 __init int intel_pmu_init(void)
 {
        union cpuid10_edx edx;
@@ -1694,6 +1701,7 @@ __init int intel_pmu_init(void)
                break;
 
        case 42: /* SandyBridge */
+               x86_pmu.quirks = intel_sandybridge_quirks;
        case 45: /* SandyBridge, "Romely-EP" */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
index c0d238f..73da6b6 100644 (file)
@@ -493,6 +493,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
        unsigned long from = cpuc->lbr_entries[0].from;
        unsigned long old_to, to = cpuc->lbr_entries[0].to;
        unsigned long ip = regs->ip;
+       int is_64bit = 0;
 
        /*
         * We don't need to fixup if the PEBS assist is fault like
@@ -544,7 +545,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
                } else
                        kaddr = (void *)to;
 
-               kernel_insn_init(&insn, kaddr);
+#ifdef CONFIG_X86_64
+               is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
+#endif
+               insn_init(&insn, kaddr, is_64bit);
                insn_get_length(&insn);
                to += insn.length;
        } while (to < ip);
index 492bf13..ef484d9 100644 (file)
@@ -1268,7 +1268,7 @@ reserve:
        }
 
 done:
-       return num ? -ENOSPC : 0;
+       return num ? -EINVAL : 0;
 }
 
 static __initconst const struct x86_pmu p4_pmu = {
index 3b97a80..c99f9ed 100644 (file)
@@ -116,16 +116,16 @@ void show_registers(struct pt_regs *regs)
                for (i = 0; i < code_len; i++, ip++) {
                        if (ip < (u8 *)PAGE_OFFSET ||
                                        probe_kernel_address(ip, c)) {
-                               printk(" Bad EIP value.");
+                               printk(KERN_CONT " Bad EIP value.");
                                break;
                        }
                        if (ip == (u8 *)regs->ip)
-                               printk("<%02x> ", c);
+                               printk(KERN_CONT "<%02x> ", c);
                        else
-                               printk("%02x ", c);
+                               printk(KERN_CONT "%02x ", c);
                }
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 
 int is_valid_bugaddr(unsigned long ip)
index 19853ad..6d728d9 100644 (file)
@@ -284,16 +284,16 @@ void show_registers(struct pt_regs *regs)
                for (i = 0; i < code_len; i++, ip++) {
                        if (ip < (u8 *)PAGE_OFFSET ||
                                        probe_kernel_address(ip, c)) {
-                               printk(" Bad RIP value.");
+                               printk(KERN_CONT " Bad RIP value.");
                                break;
                        }
                        if (ip == (u8 *)regs->ip)
-                               printk("<%02x> ", c);
+                               printk(KERN_CONT "<%02x> ", c);
                        else
-                               printk("%02x ", c);
+                               printk(KERN_CONT "%02x ", c);
                }
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 
 int is_valid_bugaddr(unsigned long ip)
index b946a9e..1bb0bf4 100644 (file)
@@ -1049,6 +1049,14 @@ int hpet_rtc_timer_init(void)
 }
 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
 
+static void hpet_disable_rtc_channel(void)
+{
+       unsigned long cfg;
+       cfg = hpet_readl(HPET_T1_CFG);
+       cfg &= ~HPET_TN_ENABLE;
+       hpet_writel(cfg, HPET_T1_CFG);
+}
+
 /*
  * The functions below are called from rtc driver.
  * Return 0 if HPET is not being used.
@@ -1060,6 +1068,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
                return 0;
 
        hpet_rtc_flags &= ~bit_mask;
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
+
        return 1;
 }
 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
@@ -1125,15 +1136,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
 
 static void hpet_rtc_timer_reinit(void)
 {
-       unsigned int cfg, delta;
+       unsigned int delta;
        int lost_ints = -1;
 
-       if (unlikely(!hpet_rtc_flags)) {
-               cfg = hpet_readl(HPET_T1_CFG);
-               cfg &= ~HPET_TN_ENABLE;
-               hpet_writel(cfg, HPET_T1_CFG);
-               return;
-       }
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
 
        if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
                delta = hpet_default_delta;
index acf8fbf..69bca46 100644 (file)
@@ -38,6 +38,9 @@ static inline void stack_overflow_check(struct pt_regs *regs)
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
        u64 curbase = (u64)task_stack_page(current);
 
+       if (user_mode_vm(regs))
+               return;
+
        WARN_ONCE(regs->sp >= curbase &&
                  regs->sp <= curbase + THREAD_SIZE &&
                  regs->sp <  curbase + sizeof(struct thread_info) +
index f2d2a66..9d46f5e 100644 (file)
@@ -256,7 +256,7 @@ static int __init microcode_dev_init(void)
        return 0;
 }
 
-static void microcode_dev_exit(void)
+static void __exit microcode_dev_exit(void)
 {
        misc_deregister(&microcode_dev);
 }
@@ -519,10 +519,8 @@ static int __init microcode_init(void)
 
        microcode_pdev = platform_device_register_simple("microcode", -1,
                                                         NULL, 0);
-       if (IS_ERR(microcode_pdev)) {
-               microcode_dev_exit();
+       if (IS_ERR(microcode_pdev))
                return PTR_ERR(microcode_pdev);
-       }
 
        get_online_cpus();
        mutex_lock(&microcode_mutex);
@@ -532,14 +530,12 @@ static int __init microcode_init(void)
        mutex_unlock(&microcode_mutex);
        put_online_cpus();
 
-       if (error) {
-               platform_device_unregister(microcode_pdev);
-               return error;
-       }
+       if (error)
+               goto out_pdev;
 
        error = microcode_dev_init();
        if (error)
-               return error;
+               goto out_sysdev_driver;
 
        register_syscore_ops(&mc_syscore_ops);
        register_hotcpu_notifier(&mc_cpu_notifier);
@@ -548,6 +544,20 @@ static int __init microcode_init(void)
                " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
 
        return 0;
+
+out_sysdev_driver:
+       get_online_cpus();
+       mutex_lock(&microcode_mutex);
+
+       sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
+
+       mutex_unlock(&microcode_mutex);
+       put_online_cpus();
+
+out_pdev:
+       platform_device_unregister(microcode_pdev);
+       return error;
+
 }
 module_init(microcode_init);
 
index 9103b89..0741b06 100644 (file)
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m)
        }
 #endif
 
+       set_bit(m->busid, mp_bus_not_pci);
        if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
-               set_bit(m->busid, mp_bus_not_pci);
 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
                mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
 #endif
index b9b3b1a..ee5d4fb 100644 (file)
@@ -403,6 +403,14 @@ void default_idle(void)
 EXPORT_SYMBOL(default_idle);
 #endif
 
+bool set_pm_idle_to_default(void)
+{
+       bool ret = !!pm_idle;
+
+       pm_idle = default_idle;
+
+       return ret;
+}
 void stop_this_cpu(void *dummy)
 {
        local_irq_disable();
index b78643d..03920a1 100644 (file)
@@ -553,4 +553,17 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
                        quirk_amd_nb_node);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
                        quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
+                       quirk_amd_nb_node);
+
 #endif
index e334be1..37a458b 100644 (file)
@@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup);
  */
 
 /*
- * Some machines require the "reboot=b"  commandline option,
+ * Some machines require the "reboot=b" or "reboot=k"  commandline options,
  * this quirk makes that automatic.
  */
 static int __init set_bios_reboot(const struct dmi_system_id *d)
@@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
        return 0;
 }
 
+static int __init set_kbd_reboot(const struct dmi_system_id *d)
+{
+       if (reboot_type != BOOT_KBD) {
+               reboot_type = BOOT_KBD;
+               printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
+       }
+       return 0;
+}
+
 static struct dmi_system_id __initdata reboot_dmi_table[] = {
        {       /* Handle problems with rebooting on Dell E520's */
                .callback = set_bios_reboot,
@@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                },
        },
        { /* Handle reboot issue on Acer Aspire one */
-               .callback = set_bios_reboot,
+               .callback = set_kbd_reboot,
                .ident = "Acer Aspire One A110",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -443,6 +452,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
                },
        },
+       {       /* Handle problems with rebooting on the OptiPlex 990. */
+               .callback = set_pci_reboot,
+               .ident = "Dell OptiPlex 990",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+               },
+       },
        { }
 };
 
index 348ce01..af6db6e 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/vsyscall.h>
 #include <asm/x86_init.h>
 #include <asm/time.h>
+#include <asm/mrst.h>
 
 #ifdef CONFIG_X86_32
 /*
@@ -242,6 +243,10 @@ static __init int add_rtc_cmos(void)
        if (of_have_populated_dt())
                return 0;
 
+       /* Intel MID platforms don't have ioport rtc */
+       if (mrst_identify_cpu())
+               return -ENODEV;
+
        platform_device_register(&rtc_device);
        dev_info(&rtc_device.dev,
                 "registered platform RTC device (no PNP device found)\n");
index ea30585..dd74e46 100644 (file)
@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
        do {
                VM_BUG_ON(compound_head(page) != head);
                pages[*nr] = page;
+               if (PageTail(page))
+                       get_huge_page_tail(page);
                (*nr)++;
                page++;
                refs++;
index b499626..f4f29b1 100644 (file)
@@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
        set_pte(kmap_pte-idx, mk_pte(page, prot));
+       arch_flush_lazy_mmu_mode();
 
        return (void *)vaddr;
 }
@@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
                 */
                kpte_clear_flush(kmap_pte-idx, vaddr);
                kmap_atomic_idx_pop();
+               arch_flush_lazy_mmu_mode();
        }
 #ifdef CONFIG_DEBUG_HIGHMEM
        else {
index bfab3fa..7b65f75 100644 (file)
@@ -568,8 +568,8 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf] - addrs[i];
                                        break;
                                }
                                if (filter[i].jt != 0) {
-                                       if (filter[i].jf)
-                                               t_offset += is_near(f_offset) ? 2 : 6;
+                                       if (filter[i].jf && f_offset)
+                                               t_offset += is_near(f_offset) ? 2 : 5;
                                        EMIT_COND_JMP(t_op, t_offset);
                                        if (filter[i].jf)
                                                EMIT_JMP(f_offset);
index cdfe4c5..f148cf6 100644 (file)
@@ -21,6 +21,7 @@ extern int op_nmi_timer_init(struct oprofile_operations *ops);
 extern void op_nmi_exit(void);
 extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
 
+static int nmi_timer;
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
@@ -31,8 +32,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 #ifdef CONFIG_X86_LOCAL_APIC
        ret = op_nmi_init(ops);
 #endif
+       nmi_timer = (ret != 0);
 #ifdef CONFIG_X86_IO_APIC
-       if (ret < 0)
+       if (nmi_timer)
                ret = op_nmi_timer_init(ops);
 #endif
        ops->backtrace = x86_backtrace;
@@ -44,6 +46,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 void oprofile_arch_exit(void)
 {
 #ifdef CONFIG_X86_LOCAL_APIC
-       op_nmi_exit();
+       if (!nmi_timer)
+               op_nmi_exit();
 #endif
 }
index e36bf71..40e4469 100644 (file)
  */
 
 static unsigned long efi_rt_eflags;
-static pgd_t efi_bak_pg_dir_pointer[2];
 
 void efi_call_phys_prelog(void)
 {
-       unsigned long cr4;
-       unsigned long temp;
        struct desc_ptr gdt_descr;
 
        local_irq_save(efi_rt_eflags);
 
-       /*
-        * If I don't have PAE, I should just duplicate two entries in page
-        * directory. If I have PAE, I just need to duplicate one entry in
-        * page directory.
-        */
-       cr4 = read_cr4_safe();
-
-       if (cr4 & X86_CR4_PAE) {
-               efi_bak_pg_dir_pointer[0].pgd =
-                   swapper_pg_dir[pgd_index(0)].pgd;
-               swapper_pg_dir[0].pgd =
-                   swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-       } else {
-               efi_bak_pg_dir_pointer[0].pgd =
-                   swapper_pg_dir[pgd_index(0)].pgd;
-               efi_bak_pg_dir_pointer[1].pgd =
-                   swapper_pg_dir[pgd_index(0x400000)].pgd;
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-               temp = PAGE_OFFSET + 0x400000;
-               swapper_pg_dir[pgd_index(0x400000)].pgd =
-                   swapper_pg_dir[pgd_index(temp)].pgd;
-       }
-
-       /*
-        * After the lock is released, the original page table is restored.
-        */
+       load_cr3(initial_page_table);
        __flush_tlb_all();
 
        gdt_descr.address = __pa(get_cpu_gdt_table(0));
@@ -85,28 +56,13 @@ void efi_call_phys_prelog(void)
 
 void efi_call_phys_epilog(void)
 {
-       unsigned long cr4;
        struct desc_ptr gdt_descr;
 
        gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
        gdt_descr.size = GDT_SIZE - 1;
        load_gdt(&gdt_descr);
 
-       cr4 = read_cr4_safe();
-
-       if (cr4 & X86_CR4_PAE) {
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   efi_bak_pg_dir_pointer[0].pgd;
-       } else {
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   efi_bak_pg_dir_pointer[0].pgd;
-               swapper_pg_dir[pgd_index(0x400000)].pgd =
-                   efi_bak_pg_dir_pointer[1].pgd;
-       }
-
-       /*
-        * After the lock is released, the original page table is restored.
-        */
+       load_cr3(swapper_pg_dir);
        __flush_tlb_all();
 
        local_irq_restore(efi_rt_eflags);
index b1489a0..ad4ec1c 100644 (file)
@@ -76,6 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
 int sfi_mrtc_num;
 
+static void mrst_power_off(void)
+{
+       if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+               intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
+}
+
+static void mrst_reboot(void)
+{
+       if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+               intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
+       else
+               intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+}
+
 /* parse all the mtimer info to a static mtimer array */
 static int __init sfi_parse_mtmr(struct sfi_table_header *table)
 {
@@ -265,17 +279,6 @@ static int mrst_i8042_detect(void)
        return 0;
 }
 
-/* Reboot and power off are handled by the SCU on a MID device */
-static void mrst_power_off(void)
-{
-       intel_scu_ipc_simple_command(0xf1, 1);
-}
-
-static void mrst_reboot(void)
-{
-       intel_scu_ipc_simple_command(0xf1, 0);
-}
-
 /*
  * Moorestown does not have external NMI source nor port 0x61 to report
  * NMI status. The possible NMI sources are from pmu as a result of NMI
@@ -484,6 +487,46 @@ static void __init *max7315_platform_data(void *info)
        return max7315;
 }
 
+static void *tca6416_platform_data(void *info)
+{
+       static struct pca953x_platform_data tca6416;
+       struct i2c_board_info *i2c_info = info;
+       int gpio_base, intr;
+       char base_pin_name[SFI_NAME_LEN + 1];
+       char intr_pin_name[SFI_NAME_LEN + 1];
+
+       strcpy(i2c_info->type, "tca6416");
+       strcpy(base_pin_name, "tca6416_base");
+       strcpy(intr_pin_name, "tca6416_int");
+
+       gpio_base = get_gpio_by_name(base_pin_name);
+       intr = get_gpio_by_name(intr_pin_name);
+
+       if (gpio_base == -1)
+               return NULL;
+       tca6416.gpio_base = gpio_base;
+       if (intr != -1) {
+               i2c_info->irq = intr + MRST_IRQ_OFFSET;
+               tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
+       } else {
+               i2c_info->irq = -1;
+               tca6416.irq_base = -1;
+       }
+       return &tca6416;
+}
+
+static void *mpu3050_platform_data(void *info)
+{
+       struct i2c_board_info *i2c_info = info;
+       int intr = get_gpio_by_name("mpu3050_int");
+
+       if (intr == -1)
+               return NULL;
+
+       i2c_info->irq = intr + MRST_IRQ_OFFSET;
+       return NULL;
+}
+
 static void __init *emc1403_platform_data(void *info)
 {
        static short intr2nd_pdata;
@@ -646,12 +689,15 @@ static void *msic_ocd_platform_data(void *info)
 static const struct devs_id __initconst device_ids[] = {
        {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
        {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
+       {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
        {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
        {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
        {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
+       {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
        {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
        {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
        {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
+       {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
 
        /* MSIC subdevices */
        {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
index 38d0af4..b2c7179 100644 (file)
@@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void)
        domid_t domid = DOMID_SELF;
        int ret;
 
-       ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
-       if (ret > 0)
-               max_pages = ret;
+       /*
+        * For the initial domain we use the maximum reservation as
+        * the maximum page.
+        *
+        * For guest domains the current maximum reservation reflects
+        * the current maximum rather than the static maximum. In this
+        * case the e820 map provided to us will cover the static
+        * maximum region.
+        */
+       if (xen_initial_domain()) {
+               ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+               if (ret > 0)
+                       max_pages = ret;
+       }
+
        return min(max_pages, MAX_DOMAIN_PAGES);
 }
 
@@ -410,6 +422,6 @@ void __init xen_arch_setup(void)
 #endif
        disable_cpuidle();
        boot_option_idle_override = IDLE_HALT;
-
+       WARN_ON(set_pm_idle_to_default());
        fiddle_vdso();
 }
index ea70e6c..15de223 100644 (file)
@@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                if (drain_all)
                        blk_throtl_drain(q);
 
-               __blk_run_queue(q);
+               /*
+                * This function might be called on a queue which failed
+                * driver init after queue creation.  Some drivers
+                * (e.g. fd) get unhappy in such cases.  Kick queue iff
+                * dispatch queue has something on it.
+                */
+               if (!list_empty(&q->queue_head))
+                       __blk_run_queue(q);
 
                if (drain_all)
                        nr_rqs = q->rq.count[0] + q->rq.count[1];
@@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
        q->backing_dev_info.name = "block";
+       q->node = node_id;
 
        err = bdi_init(&q->backing_dev_info);
        if (err) {
@@ -551,7 +559,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        if (!uninit_q)
                return NULL;
 
-       q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+       q = blk_init_allocated_queue(uninit_q, rfn, lock);
        if (!q)
                blk_cleanup_queue(uninit_q);
 
@@ -562,19 +570,10 @@ EXPORT_SYMBOL(blk_init_queue_node);
 struct request_queue *
 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
                         spinlock_t *lock)
-{
-       return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
-                             spinlock_t *lock, int node_id)
 {
        if (!q)
                return NULL;
 
-       q->node = node_id;
        if (blk_init_free_list(q))
                return NULL;
 
@@ -604,7 +603,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
 
        return NULL;
 }
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
 
 int blk_get_queue(struct request_queue *q)
 {
index 16ace89..4c12869 100644 (file)
@@ -3184,7 +3184,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
                }
        }
 
-       if (ret)
+       if (ret && ret != -EEXIST)
                printk(KERN_ERR "cfq: cic link failed!\n");
 
        return ret;
@@ -3200,6 +3200,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 {
        struct io_context *ioc = NULL;
        struct cfq_io_context *cic;
+       int ret;
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
 
@@ -3207,6 +3208,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        if (!ioc)
                return NULL;
 
+retry:
        cic = cfq_cic_lookup(cfqd, ioc);
        if (cic)
                goto out;
@@ -3215,7 +3217,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        if (cic == NULL)
                goto err;
 
-       if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+       ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
+       if (ret == -EEXIST) {
+               /* someone has linked cic to ioc already */
+               cfq_cic_free(cic);
+               goto retry;
+       } else if (ret)
                goto err_free;
 
 out:
@@ -4036,6 +4043,11 @@ static void *cfq_init_queue(struct request_queue *q)
 
        if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
                kfree(cfqg);
+
+               spin_lock(&cic_index_lock);
+               ida_remove(&cic_index_ida, cfqd->cic_index);
+               spin_unlock(&cic_index_lock);
+
                kfree(cfqd);
                return NULL;
        }
index 6bdedd7..cf047c4 100644 (file)
@@ -820,7 +820,7 @@ config PATA_PLATFORM
 
 config PATA_OF_PLATFORM
        tristate "OpenFirmware platform device PATA support"
-       depends on PATA_PLATFORM && OF
+       depends on PATA_PLATFORM && OF && OF_IRQ
        help
          This option enables support for generic directly connected ATA
          devices commonly found on embedded systems with OpenFirmware
index d8b3d89..919daa7 100644 (file)
@@ -1743,8 +1743,10 @@ void device_shutdown(void)
                 */
                list_del_init(&dev->kobj.entry);
                spin_unlock(&devices_kset->list_lock);
-               /* Disable all device's runtime power management */
-               pm_runtime_disable(dev);
+
+               /* Don't allow any more runtime suspends */
+               pm_runtime_get_noresume(dev);
+               pm_runtime_barrier(dev);
 
                if (dev->bus && dev->bus->shutdown) {
                        dev_dbg(dev, "shutdown\n");
index 30a3085..fda56bd 100644 (file)
@@ -18,6 +18,9 @@ void bcma_bus_unregister(struct bcma_bus *bus);
 int __init bcma_bus_early_register(struct bcma_bus *bus,
                                   struct bcma_device *core_cc,
                                   struct bcma_device *core_mips);
+#ifdef CONFIG_PM
+int bcma_bus_resume(struct bcma_bus *bus);
+#endif
 
 /* scan.c */
 int bcma_bus_scan(struct bcma_bus *bus);
index 1b51d8b..443b83a 100644 (file)
@@ -21,48 +21,58 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
        pr_debug("Switched to core: 0x%X\n", core->id.id);
 }
 
-static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+/* Provides access to the requested core. Returns base offset that has to be
+ * used. It makes use of fixed windows when possible. */
+static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
 {
+       switch (core->id.id) {
+       case BCMA_CORE_CHIPCOMMON:
+               return 3 * BCMA_CORE_SIZE;
+       case BCMA_CORE_PCIE:
+               return 2 * BCMA_CORE_SIZE;
+       }
+
        if (core->bus->mapped_core != core)
                bcma_host_pci_switch_core(core);
+       return 0;
+}
+
+static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+{
+       offset += bcma_host_pci_provide_access_to_core(core);
        return ioread8(core->bus->mmio + offset);
 }
 
 static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
 {
-       if (core->bus->mapped_core != core)
-               bcma_host_pci_switch_core(core);
+       offset += bcma_host_pci_provide_access_to_core(core);
        return ioread16(core->bus->mmio + offset);
 }
 
 static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
 {
-       if (core->bus->mapped_core != core)
-               bcma_host_pci_switch_core(core);
+       offset += bcma_host_pci_provide_access_to_core(core);
        return ioread32(core->bus->mmio + offset);
 }
 
 static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
                                 u8 value)
 {
-       if (core->bus->mapped_core != core)
-               bcma_host_pci_switch_core(core);
+       offset += bcma_host_pci_provide_access_to_core(core);
        iowrite8(value, core->bus->mmio + offset);
 }
 
 static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
                                 u16 value)
 {
-       if (core->bus->mapped_core != core)
-               bcma_host_pci_switch_core(core);
+       offset += bcma_host_pci_provide_access_to_core(core);
        iowrite16(value, core->bus->mmio + offset);
 }
 
 static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
                                 u32 value)
 {
-       if (core->bus->mapped_core != core)
-               bcma_host_pci_switch_core(core);
+       offset += bcma_host_pci_provide_access_to_core(core);
        iowrite32(value, core->bus->mmio + offset);
 }
 
@@ -224,6 +234,41 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
        pci_set_drvdata(dev, NULL);
 }
 
+#ifdef CONFIG_PM
+static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+       /* Host specific */
+       pci_save_state(dev);
+       pci_disable_device(dev);
+       pci_set_power_state(dev, pci_choose_state(dev, state));
+
+       return 0;
+}
+
+static int bcma_host_pci_resume(struct pci_dev *dev)
+{
+       struct bcma_bus *bus = pci_get_drvdata(dev);
+       int err;
+
+       /* Host specific */
+       pci_set_power_state(dev, 0);
+       err = pci_enable_device(dev);
+       if (err)
+               return err;
+       pci_restore_state(dev);
+
+       /* Bus specific */
+       err = bcma_bus_resume(bus);
+       if (err)
+               return err;
+
+       return 0;
+}
+#else /* CONFIG_PM */
+# define bcma_host_pci_suspend NULL
+# define bcma_host_pci_resume  NULL
+#endif /* CONFIG_PM */
+
 static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
@@ -239,6 +284,8 @@ static struct pci_driver bcma_pci_bridge_driver = {
        .id_table = bcma_pci_bridge_tbl,
        .probe = bcma_host_pci_probe,
        .remove = bcma_host_pci_remove,
+       .suspend = bcma_host_pci_suspend,
+       .resume = bcma_host_pci_resume,
 };
 
 int __init bcma_host_pci_init(void)
index 70c84b9..10f92b3 100644 (file)
@@ -240,6 +240,22 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
        return 0;
 }
 
+#ifdef CONFIG_PM
+int bcma_bus_resume(struct bcma_bus *bus)
+{
+       struct bcma_device *core;
+
+       /* Init CC core */
+       core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+       if (core) {
+               bus->drv_cc.setup_done = false;
+               bcma_core_chipcommon_init(&bus->drv_cc);
+       }
+
+       return 0;
+}
+#endif
+
 int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
 {
        drv->drv.name = drv->name;
index d729239..6f230fb 100644 (file)
@@ -129,6 +129,9 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
        u16 v;
        int i;
 
+       bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] &
+               SSB_SPROM_REVISION_REV;
+
        for (i = 0; i < 3; i++) {
                v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
                *(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
@@ -136,12 +139,70 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
 
        bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)];
 
+       bus->sprom.txpid2g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+            SSB_SPROM4_TXPID2G0) >> SSB_SPROM4_TXPID2G0_SHIFT;
+       bus->sprom.txpid2g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+            SSB_SPROM4_TXPID2G1) >> SSB_SPROM4_TXPID2G1_SHIFT;
+       bus->sprom.txpid2g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+            SSB_SPROM4_TXPID2G2) >> SSB_SPROM4_TXPID2G2_SHIFT;
+       bus->sprom.txpid2g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+            SSB_SPROM4_TXPID2G3) >> SSB_SPROM4_TXPID2G3_SHIFT;
+
+       bus->sprom.txpid5gl[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+            SSB_SPROM4_TXPID5GL0) >> SSB_SPROM4_TXPID5GL0_SHIFT;
+       bus->sprom.txpid5gl[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+            SSB_SPROM4_TXPID5GL1) >> SSB_SPROM4_TXPID5GL1_SHIFT;
+       bus->sprom.txpid5gl[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+            SSB_SPROM4_TXPID5GL2) >> SSB_SPROM4_TXPID5GL2_SHIFT;
+       bus->sprom.txpid5gl[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+            SSB_SPROM4_TXPID5GL3) >> SSB_SPROM4_TXPID5GL3_SHIFT;
+
+       bus->sprom.txpid5g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+            SSB_SPROM4_TXPID5G0) >> SSB_SPROM4_TXPID5G0_SHIFT;
+       bus->sprom.txpid5g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+            SSB_SPROM4_TXPID5G1) >> SSB_SPROM4_TXPID5G1_SHIFT;
+       bus->sprom.txpid5g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+            SSB_SPROM4_TXPID5G2) >> SSB_SPROM4_TXPID5G2_SHIFT;
+       bus->sprom.txpid5g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+            SSB_SPROM4_TXPID5G3) >> SSB_SPROM4_TXPID5G3_SHIFT;
+
+       bus->sprom.txpid5gh[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+            SSB_SPROM4_TXPID5GH0) >> SSB_SPROM4_TXPID5GH0_SHIFT;
+       bus->sprom.txpid5gh[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+            SSB_SPROM4_TXPID5GH1) >> SSB_SPROM4_TXPID5GH1_SHIFT;
+       bus->sprom.txpid5gh[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+            SSB_SPROM4_TXPID5GH2) >> SSB_SPROM4_TXPID5GH2_SHIFT;
+       bus->sprom.txpid5gh[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+            SSB_SPROM4_TXPID5GH3) >> SSB_SPROM4_TXPID5GH3_SHIFT;
+
        bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)];
        bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)];
        bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)];
        bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)];
 
        bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)];
+
+       bus->sprom.fem.ghz2.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+               SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+       bus->sprom.fem.ghz2.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+               SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+       bus->sprom.fem.ghz2.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+               SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+       bus->sprom.fem.ghz2.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+               SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+       bus->sprom.fem.ghz2.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+               SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
+
+       bus->sprom.fem.ghz5.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+               SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+       bus->sprom.fem.ghz5.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+               SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+       bus->sprom.fem.ghz5.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+               SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+       bus->sprom.fem.ghz5.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+               SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+       bus->sprom.fem.ghz5.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+               SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
 }
 
 int bcma_sprom_get(struct bcma_bus *bus)
index 8004ac3..587cce5 100644 (file)
@@ -2601,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
                        c->Request.Timeout = 0;
                        c->Request.CDB[0] = BMIC_WRITE;
                        c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+                       c->Request.CDB[7] = (size >> 8) & 0xFF;
+                       c->Request.CDB[8] = size & 0xFF;
                        break;
                case TEST_UNIT_READY:
                        c->Request.CDBLen = 6;
@@ -4880,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
 {
        if (h->msix_vector || h->msi_vector) {
                if (!request_irq(h->intr[h->intr_mode], msixhandler,
-                               IRQF_DISABLED, h->devname, h))
+                               0, h->devname, h))
                        return 0;
                dev_err(&h->pdev->dev, "Unable to get msi irq %d"
                        " for %s\n", h->intr[h->intr_mode],
@@ -4889,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
        }
 
        if (!request_irq(h->intr[h->intr_mode], intxhandler,
-                       IRQF_DISABLED, h->devname, h))
+                       IRQF_SHARED, h->devname, h))
                return 0;
        dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
                h->intr[h->intr_mode], h->devname);
index 68b205a..1e888c9 100644 (file)
@@ -422,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
 
                /*
                 * We use punch hole to reclaim the free space used by the
-                * image a.k.a. discard. However we do support discard if
+                * image a.k.a. discard. However we do not support discard if
                 * encryption is enabled, because it may give an attacker
                 * useful information.
                 */
@@ -797,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
        }
 
        q->limits.discard_granularity = inode->i_sb->s_blocksize;
-       q->limits.discard_alignment = inode->i_sb->s_blocksize;
+       q->limits.discard_alignment = 0;
        q->limits.max_discard_sectors = UINT_MAX >> 9;
        q->limits.discard_zeroes_data = 1;
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
index 65cc424..148ab94 100644 (file)
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list);      /* clients */
 
 static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
 static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf,
-                                size_t size);
 static ssize_t rbd_snap_add(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
        u32 snap_count = le32_to_cpu(ondisk->snap_count);
        int ret = -ENOMEM;
 
+       if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+               return -ENXIO;
+       }
+
        init_rwsem(&header->snap_rwsem);
        header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
        header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1355,32 +1355,6 @@ fail:
        return ret;
 }
 
-/*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
-                                    u64 snapid,
-                                    const char *obj)
-{
-       struct ceph_osd_req_op *ops;
-       int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
-       if (ret < 0)
-               return ret;
-
-       ops[0].snap.snapid = snapid;
-
-       ret = rbd_req_sync_op(dev, NULL,
-                              CEPH_NOSNAP,
-                              0,
-                              CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
-                              ops,
-                              1, obj, 0, 0, NULL, NULL, NULL);
-
-       rbd_destroy_ops(ops);
-
-       return ret;
-}
-
 /*
  * Request sync osd read
  */
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
                        goto out_dh;
 
                rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
-               if (rc < 0)
+               if (rc < 0) {
+                       if (rc == -ENXIO) {
+                               pr_warning("unrecognized header format"
+                                          " for image %s", rbd_dev->obj);
+                       }
                        goto out_dh;
+               }
 
                if (snap_count != header->total_snaps) {
                        snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
 static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
 
 static struct attribute *rbd_attrs[] = {
        &dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
        &dev_attr_current_snap.attr,
        &dev_attr_refresh.attr,
        &dev_attr_create_snap.attr,
-       &dev_attr_rollback_snap.attr,
        NULL
 };
 
@@ -2424,64 +2401,6 @@ err_unlock:
        return ret;
 }
 
-static ssize_t rbd_snap_rollback(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf,
-                                size_t count)
-{
-       struct rbd_device *rbd_dev = dev_to_rbd(dev);
-       int ret;
-       u64 snapid;
-       u64 cur_ofs;
-       char *seg_name = NULL;
-       char *snap_name = kmalloc(count + 1, GFP_KERNEL);
-       ret = -ENOMEM;
-       if (!snap_name)
-               return ret;
-
-       /* parse snaps add command */
-       snprintf(snap_name, count, "%s", buf);
-       seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
-       if (!seg_name)
-               goto done;
-
-       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
-       ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
-       if (ret < 0)
-               goto done_unlock;
-
-       dout("snapid=%lld\n", snapid);
-
-       cur_ofs = 0;
-       while (cur_ofs < rbd_dev->header.image_size) {
-               cur_ofs += rbd_get_segment(&rbd_dev->header,
-                                          rbd_dev->obj,
-                                          cur_ofs, (u64)-1,
-                                          seg_name, NULL);
-               dout("seg_name=%s\n", seg_name);
-
-               ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
-               if (ret < 0)
-                       pr_warning("could not roll back obj %s err=%d\n",
-                                  seg_name, ret);
-       }
-
-       ret = __rbd_update_snaps(rbd_dev);
-       if (ret < 0)
-               goto done_unlock;
-
-       ret = count;
-
-done_unlock:
-       mutex_unlock(&ctl_mutex);
-done:
-       kfree(seg_name);
-       kfree(snap_name);
-
-       return ret;
-}
-
 static struct bus_attribute rbd_bus_attrs[] = {
        __ATTR(add, S_IWUSR, NULL, rbd_add),
        __ATTR(remove, S_IWUSR, NULL, rbd_remove),
index ae3e167..89ddab1 100644 (file)
@@ -16,6 +16,8 @@
  * handle GCR disks
  */
 
+#undef DEBUG
+
 #include <linux/stddef.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
 #define MAX_FLOPPIES   2
 
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
 enum swim_state {
        idle,
        locating,
@@ -177,7 +177,6 @@ struct swim3 {
 
 struct floppy_state {
        enum swim_state state;
-       spinlock_t lock;
        struct swim3 __iomem *swim3;    /* hardware registers */
        struct dbdma_regs __iomem *dma; /* DMA controller registers */
        int     swim3_intr;     /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
        int     wanted;
        struct macio_dev *mdev;
        char    dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+       int     index;
+       struct request *cur_req;
 };
 
+#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...)        dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...)        dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...) do { } while(0)
+#endif
+
 static struct floppy_state floppy_states[MAX_FLOPPIES];
 static int floppy_count = 0;
 static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
        0, 0, 0, 0, 0, 0
 };
 
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
-                       void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
 static void seek_track(struct floppy_state *fs, int n);
 static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
 static void act(struct floppy_state *fs);
 static void scan_timeout(unsigned long data);
 static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
                                        unsigned int clearing);
 static int floppy_revalidate(struct gendisk *disk);
 
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
 {
-       if (__blk_end_request(fd_req, err, nr_bytes))
-               return true;
+       struct request *req = fs->cur_req;
+       int rc;
 
-       fd_req = NULL;
-       return false;
-}
+       swim3_dbg("  end request, err=%d nr_bytes=%d, cur_req=%p\n",
+                 err, nr_bytes, req);
 
-static bool swim3_end_request_cur(int err)
-{
-       return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+       if (err)
+               nr_bytes = blk_rq_cur_bytes(req);
+       rc = __blk_end_request(req, err, nr_bytes);
+       if (rc)
+               return true;
+       fs->cur_req = NULL;
+       return false;
 }
 
 static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
        return (stat & DATA) == 0;
 }
 
-static void do_fd_request(struct request_queue * q)
-{
-       int i;
-
-       for(i=0; i<floppy_count; i++) {
-               struct floppy_state *fs = &floppy_states[i];
-               if (fs->mdev->media_bay &&
-                   check_media_bay(fs->mdev->media_bay) != MB_FD)
-                       continue;
-               start_request(fs);
-       }
-}
-
 static void start_request(struct floppy_state *fs)
 {
        struct request *req;
        unsigned long x;
 
+       swim3_dbg("start request, initial state=%d\n", fs->state);
+
        if (fs->state == idle && fs->wanted) {
                fs->state = available;
                wake_up(&fs->wait);
                return;
        }
        while (fs->state == idle) {
-               if (!fd_req) {
-                       fd_req = blk_fetch_request(swim3_queue);
-                       if (!fd_req)
+               swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+               if (!fs->cur_req) {
+                       fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+                       swim3_dbg("  fetched request %p\n", fs->cur_req);
+                       if (!fs->cur_req)
                                break;
                }
-               req = fd_req;
-#if 0
-               printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
-                      req->rq_disk->disk_name, req->cmd,
-                      (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
-               printk("           errors=%d current_nr_sectors=%u\n",
-                      req->errors, blk_rq_cur_sectors(req));
+               req = fs->cur_req;
+
+               if (fs->mdev->media_bay &&
+                   check_media_bay(fs->mdev->media_bay) != MB_FD) {
+                       swim3_dbg("%s", "  media bay absent, dropping req\n");
+                       swim3_end_request(fs, -ENODEV, 0);
+                       continue;
+               }
+
+#if 0 /* This is really too verbose */
+               swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+                         req->rq_disk->disk_name, req->cmd,
+                         (long)blk_rq_pos(req), blk_rq_sectors(req),
+                         req->buffer);
+               swim3_dbg("           errors=%d current_nr_sectors=%u\n",
+                         req->errors, blk_rq_cur_sectors(req));
 #endif
 
                if (blk_rq_pos(req) >= fs->total_secs) {
-                       swim3_end_request_cur(-EIO);
+                       swim3_dbg("  pos out of bounds (%ld, max is %ld)\n",
+                                 (long)blk_rq_pos(req), (long)fs->total_secs);
+                       swim3_end_request(fs, -EIO, 0);
                        continue;
                }
                if (fs->ejected) {
-                       swim3_end_request_cur(-EIO);
+                       swim3_dbg("%s", "  disk ejected\n");
+                       swim3_end_request(fs, -EIO, 0);
                        continue;
                }
 
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
                        if (fs->write_prot < 0)
                                fs->write_prot = swim3_readbit(fs, WRITE_PROT);
                        if (fs->write_prot) {
-                               swim3_end_request_cur(-EIO);
+                               swim3_dbg("%s", "  try to write, disk write protected\n");
+                               swim3_end_request(fs, -EIO, 0);
                                continue;
                        }
                }
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
                x = ((long)blk_rq_pos(req)) % fs->secpercyl;
                fs->head = x / fs->secpertrack;
                fs->req_sector = x % fs->secpertrack + 1;
-               fd_req = req;
                fs->state = do_transfer;
                fs->retries = 0;
 
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
        }
 }
 
+static void do_fd_request(struct request_queue * q)
+{
+       start_request(q->queuedata);
+}
+
 static void set_timeout(struct floppy_state *fs, int nticks,
                        void (*proc)(unsigned long))
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&fs->lock, flags);
        if (fs->timeout_pending)
                del_timer(&fs->timeout);
        fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
        fs->timeout.data = (unsigned long) fs;
        add_timer(&fs->timeout);
        fs->timeout_pending = 1;
-       spin_unlock_irqrestore(&fs->lock, flags);
 }
 
 static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
        struct swim3 __iomem *sw = fs->swim3;
        struct dbdma_cmd *cp = fs->dma_cmd;
        struct dbdma_regs __iomem *dr = fs->dma;
+       struct request *req = fs->cur_req;
 
-       if (blk_rq_cur_sectors(fd_req) <= 0) {
-               printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+       if (blk_rq_cur_sectors(req) <= 0) {
+               swim3_warn("%s", "Transfer 0 sectors ?\n");
                return;
        }
-       if (rq_data_dir(fd_req) == WRITE)
+       if (rq_data_dir(req) == WRITE)
                n = 1;
        else {
                n = fs->secpertrack - fs->req_sector + 1;
-               if (n > blk_rq_cur_sectors(fd_req))
-                       n = blk_rq_cur_sectors(fd_req);
+               if (n > blk_rq_cur_sectors(req))
+                       n = blk_rq_cur_sectors(req);
        }
+
+       swim3_dbg("  setup xfer at sect %d (of %d) head %d for %d\n",
+                 fs->req_sector, fs->secpertrack, fs->head, n);
+
        fs->scount = n;
        swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
        out_8(&sw->sector, fs->req_sector);
        out_8(&sw->nsect, n);
        out_8(&sw->gap3, 0);
        out_le32(&dr->cmdptr, virt_to_bus(cp));
-       if (rq_data_dir(fd_req) == WRITE) {
+       if (rq_data_dir(req) == WRITE) {
                /* Set up 3 dma commands: write preamble, data, postamble */
                init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
                ++cp;
-               init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+               init_dma(cp, OUTPUT_MORE, req->buffer, 512);
                ++cp;
                init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
        } else {
-               init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+               init_dma(cp, INPUT_LAST, req->buffer, n * 512);
        }
        ++cp;
        out_le16(&cp->command, DBDMA_STOP);
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
        in_8(&sw->error);
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
-       if (rq_data_dir(fd_req) == WRITE)
+       if (rq_data_dir(req) == WRITE)
                out_8(&sw->control_bis, WRITE_SECTORS);
        in_8(&sw->intr);
        out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
 static void act(struct floppy_state *fs)
 {
        for (;;) {
+               swim3_dbg("  act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+                         fs->state, fs->req_cyl, fs->cur_cyl);
+
                switch (fs->state) {
                case idle:
                        return;         /* XXX shouldn't get here */
 
                case locating:
                        if (swim3_readbit(fs, TRACK_ZERO)) {
+                               swim3_dbg("%s", "    locate track 0\n");
                                fs->cur_cyl = 0;
                                if (fs->req_cyl == 0)
                                        fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
                                break;
                        }
                        if (fs->req_cyl == fs->cur_cyl) {
-                               printk("whoops, seeking 0\n");
+                               swim3_warn("%s", "Whoops, seeking 0\n");
                                fs->state = do_transfer;
                                break;
                        }
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
                case do_transfer:
                        if (fs->cur_cyl != fs->req_cyl) {
                                if (fs->retries > 5) {
-                                       swim3_end_request_cur(-EIO);
+                                       swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+                                                 fs->req_cyl, fs->cur_cyl);
+                                       swim3_end_request(fs, -EIO, 0);
                                        fs->state = idle;
                                        return;
                                }
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
                        return;
 
                default:
-                       printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+                       swim3_err("Unknown state %d\n", fs->state);
                        return;
                }
        }
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* scan timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
        out_8(&sw->select, RELAX);
        out_8(&sw->intr_enable, 0);
        fs->cur_cyl = -1;
        if (fs->retries > 5) {
-               swim3_end_request_cur(-EIO);
+               swim3_end_request(fs, -EIO, 0);
                fs->state = idle;
                start_request(fs);
        } else {
                fs->state = jogging;
                act(fs);
        }
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void seek_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* seek timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_8(&sw->control_bic, DO_SEEK);
        out_8(&sw->select, RELAX);
        out_8(&sw->intr_enable, 0);
-       printk(KERN_ERR "swim3: seek timeout\n");
-       swim3_end_request_cur(-EIO);
+       swim3_err("%s", "Seek timeout\n");
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void settle_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* settle timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        if (swim3_readbit(fs, SEEK_COMPLETE)) {
                out_8(&sw->select, RELAX);
                fs->state = locating;
                act(fs);
-               return;
+               goto unlock;
        }
        out_8(&sw->select, RELAX);
        if (fs->settle_time < 2*HZ) {
                ++fs->settle_time;
                set_timeout(fs, 1, settle_timeout);
-               return;
+               goto unlock;
        }
-       printk(KERN_ERR "swim3: seek settle timeout\n");
-       swim3_end_request_cur(-EIO);
+       swim3_err("%s", "Seek settle timeout\n");
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+ unlock:
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
        struct dbdma_regs __iomem *dr = fs->dma;
+       unsigned long flags;
        int n;
 
+       swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_le32(&dr->control, RUN << 16);
        /* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
        out_8(&sw->intr_enable, 0);
        out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
        out_8(&sw->select, RELAX);
-       printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
-              (rq_data_dir(fd_req)==WRITE? "writ": "read"),
-              (long)blk_rq_pos(fd_req));
-       swim3_end_request_cur(-EIO);
+       swim3_err("Timeout %sing sector %ld\n",
+              (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+              (long)blk_rq_pos(fs->cur_req));
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
        int stat, resid;
        struct dbdma_regs __iomem *dr;
        struct dbdma_cmd *cp;
+       unsigned long flags;
+       struct request *req = fs->cur_req;
+
+       swim3_dbg("* interrupt, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        intr = in_8(&sw->intr);
        err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
        if ((intr & ERROR_INTR) && fs->state != do_transfer)
-               printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
-                      fs->state, rq_data_dir(fd_req), intr, err);
+               swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+                         fs->state, rq_data_dir(req), intr, err);
        switch (fs->state) {
        case locating:
                if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        del_timer(&fs->timeout);
                        fs->timeout_pending = 0;
                        if (sw->ctrack == 0xff) {
-                               printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+                               swim3_err("%s", "Seen sector but cyl=ff?\n");
                                fs->cur_cyl = -1;
                                if (fs->retries > 5) {
-                                       swim3_end_request_cur(-EIO);
+                                       swim3_end_request(fs, -EIO, 0);
                                        fs->state = idle;
                                        start_request(fs);
                                } else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        fs->cur_cyl = sw->ctrack;
                        fs->cur_sector = sw->csect;
                        if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
-                               printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
-                                      fs->expect_cyl, fs->cur_cyl);
+                               swim3_err("Expected cyl %d, got %d\n",
+                                         fs->expect_cyl, fs->cur_cyl);
                        fs->state = do_transfer;
                        act(fs);
                }
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                fs->timeout_pending = 0;
                dr = fs->dma;
                cp = fs->dma_cmd;
-               if (rq_data_dir(fd_req) == WRITE)
+               if (rq_data_dir(req) == WRITE)
                        ++cp;
                /*
                 * Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                if (intr & ERROR_INTR) {
                        n = fs->scount - 1 - resid / 512;
                        if (n > 0) {
-                               blk_update_request(fd_req, 0, n << 9);
+                               blk_update_request(req, 0, n << 9);
                                fs->req_sector += n;
                        }
                        if (fs->retries < 5) {
                                ++fs->retries;
                                act(fs);
                        } else {
-                               printk("swim3: error %sing block %ld (err=%x)\n",
-                                      rq_data_dir(fd_req) == WRITE? "writ": "read",
-                                      (long)blk_rq_pos(fd_req), err);
-                               swim3_end_request_cur(-EIO);
+                               swim3_err("Error %sing block %ld (err=%x)\n",
+                                      rq_data_dir(req) == WRITE? "writ": "read",
+                                      (long)blk_rq_pos(req), err);
+                               swim3_end_request(fs, -EIO, 0);
                                fs->state = idle;
                        }
                } else {
                        if ((stat & ACTIVE) == 0 || resid != 0) {
                                /* musta been an error */
-                               printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
-                               printk(KERN_ERR "  state=%d, dir=%x, intr=%x, err=%x\n",
-                                      fs->state, rq_data_dir(fd_req), intr, err);
-                               swim3_end_request_cur(-EIO);
+                               swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+                               swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
+                                         fs->state, rq_data_dir(req), intr, err);
+                               swim3_end_request(fs, -EIO, 0);
                                fs->state = idle;
                                start_request(fs);
                                break;
                        }
-                       if (swim3_end_request(0, fs->scount << 9)) {
+                       fs->retries = 0;
+                       if (swim3_end_request(fs, 0, fs->scount << 9)) {
                                fs->req_sector += fs->scount;
                                if (fs->req_sector > fs->secpertrack) {
                                        fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        start_request(fs);
                break;
        default:
-               printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+               swim3_err("Don't know what to do in state %d\n", fs->state);
        }
+       spin_unlock_irqrestore(&swim3_lock, flags);
        return IRQ_HANDLED;
 }
 
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
 }
 */
 
+/* Called under the mutex to grab exclusive access to a drive */
 static int grab_drive(struct floppy_state *fs, enum swim_state state,
                      int interruptible)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&fs->lock, flags);
-       if (fs->state != idle) {
+       swim3_dbg("%s", "-> grab drive\n");
+
+       spin_lock_irqsave(&swim3_lock, flags);
+       if (fs->state != idle && fs->state != available) {
                ++fs->wanted;
                while (fs->state != available) {
+                       spin_unlock_irqrestore(&swim3_lock, flags);
                        if (interruptible && signal_pending(current)) {
                                --fs->wanted;
-                               spin_unlock_irqrestore(&fs->lock, flags);
                                return -EINTR;
                        }
                        interruptible_sleep_on(&fs->wait);
+                       spin_lock_irqsave(&swim3_lock, flags);
                }
                --fs->wanted;
        }
        fs->state = state;
-       spin_unlock_irqrestore(&fs->lock, flags);
+       spin_unlock_irqrestore(&swim3_lock, flags);
+
        return 0;
 }
 
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&fs->lock, flags);
+       swim3_dbg("%s", "-> release drive\n");
+
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->state = idle;
        start_request(fs);
-       spin_unlock_irqrestore(&fs->lock, flags);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
 {
        struct floppy_state *fs = disk->private_data;
        struct swim3 __iomem *sw = fs->swim3;
+
        mutex_lock(&swim3_mutex);
        if (fs->ref_count > 0 && --fs->ref_count == 0) {
                swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
        .revalidate_disk= floppy_revalidate,
 };
 
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+       struct floppy_state *fs = macio_get_drvdata(mdev);
+       struct swim3 __iomem *sw = fs->swim3;
+
+       if (!fs)
+               return;
+       if (mb_state != MB_FD)
+               return;
+
+       /* Clear state */
+       out_8(&sw->intr_enable, 0);
+       in_8(&sw->intr);
+       in_8(&sw->error);
+}
+
 static int swim3_add_device(struct macio_dev *mdev, int index)
 {
        struct device_node *swim = mdev->ofdev.dev.of_node;
        struct floppy_state *fs = &floppy_states[index];
        int rc = -EBUSY;
 
+       /* Do this first for message macros */
+       memset(fs, 0, sizeof(*fs));
+       fs->mdev = mdev;
+       fs->index = index;
+
        /* Check & Request resources */
        if (macio_resource_count(mdev) < 2) {
-               printk(KERN_WARNING "ifd%d: no address for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "No address in device-tree\n");
                return -ENXIO;
        }
-       if (macio_irq_count(mdev) < 2) {
-               printk(KERN_WARNING "fd%d: no intrs for device %s\n",
-                       index, swim->full_name);
+       if (macio_irq_count(mdev) < 1) {
+               swim3_err("%s", "No interrupt in device-tree\n");
+               return -ENXIO;
        }
        if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
-               printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Can't request mmio resource\n");
                return -EBUSY;
        }
        if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
-               printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Can't request dma resource\n");
                macio_release_resource(mdev, 0);
                return -EBUSY;
        }
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
        if (mdev->media_bay == NULL)
                pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
        
-       memset(fs, 0, sizeof(*fs));
-       spin_lock_init(&fs->lock);
        fs->state = idle;
        fs->swim3 = (struct swim3 __iomem *)
                ioremap(macio_resource_start(mdev, 0), 0x200);
        if (fs->swim3 == NULL) {
-               printk("fd%d: couldn't map registers for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Couldn't map mmio registers\n");
                rc = -ENOMEM;
                goto out_release;
        }
        fs->dma = (struct dbdma_regs __iomem *)
                ioremap(macio_resource_start(mdev, 1), 0x200);
        if (fs->dma == NULL) {
-               printk("fd%d: couldn't map DMA for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Couldn't map dma registers\n");
                iounmap(fs->swim3);
                rc = -ENOMEM;
                goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
        fs->secpercyl = 36;
        fs->secpertrack = 18;
        fs->total_secs = 2880;
-       fs->mdev = mdev;
        init_waitqueue_head(&fs->wait);
 
        fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
        memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
        st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
 
+       if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+               swim3_mb_event(mdev, MB_FD);
+
        if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
-               printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
-                      index, fs->swim3_intr, swim->full_name);
+               swim3_err("%s", "Couldn't request interrupt\n");
                pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
                goto out_unmap;
                return -EBUSY;
        }
-/*
-       if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
-               printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
-                      fs->dma_intr);
-               return -EBUSY;
-       }
-*/
 
        init_timer(&fs->timeout);
 
-       printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+       swim3_info("SWIM3 floppy controller %s\n",
                mdev->media_bay ? "in media bay" : "");
 
        return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
 
 static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
 {
-       int i, rc;
        struct gendisk *disk;
+       int index, rc;
+
+       index = floppy_count++;
+       if (index >= MAX_FLOPPIES)
+               return -ENXIO;
 
        /* Add the drive */
-       rc = swim3_add_device(mdev, floppy_count);
+       rc = swim3_add_device(mdev, index);
        if (rc)
                return rc;
+       /* Now register that disk. Same comment about failure handling */
+       disk = disks[index] = alloc_disk(1);
+       if (disk == NULL)
+               return -ENOMEM;
+       disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+       if (disk->queue == NULL) {
+               put_disk(disk);
+               return -ENOMEM;
+       }
+       disk->queue->queuedata = &floppy_states[index];
 
-       /* Now create the queue if not there yet */
-       if (swim3_queue == NULL) {
+       if (index == 0) {
                /* If we failed, there isn't much we can do as the driver is still
                 * too dumb to remove the device, just bail out
                 */
                if (register_blkdev(FLOPPY_MAJOR, "fd"))
                        return 0;
-               swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
-               if (swim3_queue == NULL) {
-                       unregister_blkdev(FLOPPY_MAJOR, "fd");
-                       return 0;
-               }
        }
 
-       /* Now register that disk. Same comment about failure handling */
-       i = floppy_count++;
-       disk = disks[i] = alloc_disk(1);
-       if (disk == NULL)
-               return 0;
-
        disk->major = FLOPPY_MAJOR;
-       disk->first_minor = i;
+       disk->first_minor = index;
        disk->fops = &floppy_fops;
-       disk->private_data = &floppy_states[i];
-       disk->queue = swim3_queue;
+       disk->private_data = &floppy_states[index];
        disk->flags |= GENHD_FL_REMOVABLE;
-       sprintf(disk->disk_name, "fd%d", i);
+       sprintf(disk->disk_name, "fd%d", index);
        set_capacity(disk, 2880);
        add_disk(disk);
 
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
                .of_match_table = swim3_match,
        },
        .probe          = swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+       .mediabay_event = swim3_mb_event,
+#endif
 #if 0
        .suspend        = swim3_suspend,
        .resume         = swim3_resume,
index 11b41fd..5ccf142 100644 (file)
@@ -188,7 +188,7 @@ config BT_MRVL
          The core driver to support Marvell Bluetooth devices.
 
          This driver is required if you want to support
-         Marvell Bluetooth devices, such as 8688/8787.
+         Marvell Bluetooth devices, such as 8688/8787/8797.
 
          Say Y here to compile Marvell Bluetooth driver
          into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@ config BT_MRVL_SDIO
          The driver for Marvell Bluetooth chipsets with SDIO interface.
 
          This driver is required if you want to use Marvell Bluetooth
-         devices with SDIO interface. Currently SD8688/SD8787 chipsets are
-         supported.
+         devices with SDIO interface. Currently SD8688/SD8787/SD8797
+         chipsets are supported.
 
          Say Y here to compile support for Marvell BT-over-SDIO driver
          into the kernel or say M to compile it as module.
index 9ef4816..27b74b0 100644 (file)
@@ -65,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
        .io_port_1 = 0x01,
        .io_port_2 = 0x02,
 };
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
        .cfg = 0x00,
        .host_int_mask = 0x02,
        .host_intstatus = 0x03,
@@ -92,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
        .helper         = NULL,
        .firmware       = "mrvl/sd8787_uapsta.bin",
-       .reg            = &btmrvl_reg_8787,
+       .reg            = &btmrvl_reg_87xx,
+       .sd_blksz_fw_dl = 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+       .helper         = NULL,
+       .firmware       = "mrvl/sd8797_uapsta.bin",
+       .reg            = &btmrvl_reg_87xx,
        .sd_blksz_fw_dl = 256,
 };
 
@@ -103,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
        /* Marvell SD8787 Bluetooth device */
        { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
                        .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+       /* Marvell SD8797 Bluetooth device */
+       { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+                       .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
 
        { }     /* Terminating entry */
 };
@@ -1076,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
 MODULE_FIRMWARE("sd8688_helper.bin");
 MODULE_FIRMWARE("sd8688.bin");
 MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
index 2bd87d4..ea5ad1c 100644 (file)
@@ -785,9 +785,8 @@ skip_waking:
                usb_mark_last_busy(data->udev);
        }
 
-       usb_free_urb(urb);
-
 done:
+       usb_free_urb(urb);
        return err;
 }
 
index 2e302a1..2ed6ab1 100644 (file)
@@ -41,6 +41,8 @@
 
 #define VERSION "1.3"
 
+static bool amp;
+
 struct vhci_data {
        struct hci_dev *hdev;
 
@@ -239,6 +241,9 @@ static int vhci_open(struct inode *inode, struct file *file)
        hdev->bus = HCI_VIRTUAL;
        hdev->driver_data = data;
 
+       if (amp)
+               hdev->dev_type = HCI_AMP;
+
        hdev->open     = vhci_open_dev;
        hdev->close    = vhci_close_dev;
        hdev->flush    = vhci_flush;
@@ -303,6 +308,9 @@ static void __exit vhci_exit(void)
 module_init(vhci_init);
 module_exit(vhci_exit);
 
+module_param(amp, bool, 0644);
+MODULE_PARM_DESC(amp, "Create AMP controller device");
+
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
 MODULE_VERSION(VERSION);
index c2917ff..34767a6 100644 (file)
 #define IPMI_WDOG_SET_TIMER            0x24
 #define IPMI_WDOG_GET_TIMER            0x25
 
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP  0x80
+
 /* These are here until the real ones get into the watchdog.h interface. */
 #ifndef WDIOC_GETTIMEOUT
 #define        WDIOC_GETTIMEOUT        _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@@ -596,6 +598,7 @@ static int ipmi_heartbeat(void)
        struct kernel_ipmi_msg            msg;
        int                               rv;
        struct ipmi_system_interface_addr addr;
+       int                               timeout_retries = 0;
 
        if (ipmi_ignore_heartbeat)
                return 0;
@@ -616,6 +619,7 @@ static int ipmi_heartbeat(void)
 
        mutex_lock(&heartbeat_lock);
 
+restart:
        atomic_set(&heartbeat_tofree, 2);
 
        /*
@@ -653,7 +657,33 @@ static int ipmi_heartbeat(void)
        /* Wait for the heartbeat to be sent. */
        wait_for_completion(&heartbeat_wait);
 
-       if (heartbeat_recv_msg.msg.data[0] != 0) {
+       if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)  {
+               timeout_retries++;
+               if (timeout_retries > 3) {
+                       printk(KERN_ERR PFX ": Unable to restore the IPMI"
+                              " watchdog's settings, giving up.\n");
+                       rv = -EIO;
+                       goto out_unlock;
+               }
+
+               /*
+                * The timer was not initialized, that means the BMC was
+                * probably reset and lost the watchdog information.  Attempt
+                * to restore the timer's info.  Note that we still hold
+                * the heartbeat lock, to keep a heartbeat from happening
+                * in this process, so must say no heartbeat to avoid a
+                * deadlock on this mutex.
+                */
+               rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+               if (rv) {
+                       printk(KERN_ERR PFX ": Unable to send the command to"
+                              " set the watchdog's settings, giving up.\n");
+                       goto out_unlock;
+               }
+
+               /* We might need a new heartbeat, so do it now */
+               goto restart;
+       } else if (heartbeat_recv_msg.msg.data[0] != 0) {
                /*
                 * Got an error in the heartbeat response.  It was already
                 * reported in ipmi_wdog_msg_handler, but we should return
@@ -662,6 +692,7 @@ static int ipmi_heartbeat(void)
                rv = -EINVAL;
        }
 
+out_unlock:
        mutex_unlock(&heartbeat_lock);
 
        return rv;
@@ -922,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
 static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
                                  void                 *handler_data)
 {
-       if (msg->msg.data[0] != 0) {
+       if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+                       msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+               printk(KERN_INFO PFX "response: The IPMI controller appears"
+                      " to have been reset, will attempt to reinitialize"
+                      " the watchdog timer\n");
+       else if (msg->msg.data[0] != 0)
                printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
                       msg->msg.data[0],
                       msg->msg.cmd);
-       }
 
        ipmi_free_recv_msg(msg);
 }
index c811cb1..2cce44a 100644 (file)
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
        ibft_cleanup();
 }
 
+#ifdef CONFIG_ACPI
+static const struct {
+       char *sign;
+} ibft_signs[] = {
+       /*
+        * One spec says "IBFT", the other says "iBFT". We have to check
+        * for both.
+        */
+       { ACPI_SIG_IBFT },
+       { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+       int i;
+       struct acpi_table_header *table = NULL;
+
+       if (acpi_disabled)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+               acpi_get_table(ibft_signs[i].sign, 0, &table);
+               ibft_addr = (struct acpi_table_ibft *)table;
+       }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
 /*
  * ibft_init() - creates sysfs tree entries for the iBFT data.
  */
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
 {
        int rc = 0;
 
+       /*
+          As on UEFI systems the setup_arch()/find_ibft_region()
+          is called before ACPI tables are parsed and it only does
+          legacy finding.
+       */
+       if (!ibft_addr)
+               acpi_find_ibft_region();
+
        if (ibft_addr) {
-               printk(KERN_INFO "iBFT detected at 0x%llx.\n",
-                      (u64)isa_virt_to_bus(ibft_addr));
+               pr_info("iBFT detected.\n");
 
                rc = ibft_check_device();
                if (rc)
index bfe7232..4da4eb9 100644 (file)
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
 static const struct {
        char *sign;
 } ibft_signs[] = {
-#ifdef CONFIG_ACPI
-       /*
-        * One spec says "IBFT", the other says "iBFT". We have to check
-        * for both.
-        */
-       { ACPI_SIG_IBFT },
-#endif
        { "iBFT" },
        { "BIFT" },     /* Broadcom iSCSI Offload */
 };
@@ -62,14 +55,6 @@ static const struct {
 #define VGA_MEM 0xA0000 /* VGA buffer */
 #define VGA_SIZE 0x20000 /* 128kB */
 
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
-       ibft_addr = (struct acpi_table_ibft *)header;
-       return 0;
-}
-#endif /* CONFIG_ACPI */
-
 static int __init find_ibft_in_mem(void)
 {
        unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
                                 * the table cannot be valid. */
                                if (pos + len <= (IBFT_END-1)) {
                                        ibft_addr = (struct acpi_table_ibft *)virt;
+                                       pr_info("iBFT found at 0x%lx.\n", pos);
                                        goto done;
                                }
                        }
@@ -108,20 +94,12 @@ done:
  */
 unsigned long __init find_ibft_region(unsigned long *sizep)
 {
-#ifdef CONFIG_ACPI
-       int i;
-#endif
        ibft_addr = NULL;
 
-#ifdef CONFIG_ACPI
-       for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
-               acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
        /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
         * only use ACPI for this */
 
-       if (!ibft_addr && !efi_enabled)
+       if (!efi_enabled)
                find_ibft_in_mem();
 
        if (ibft_addr) {
index f10fc52..1eedb6f 100644 (file)
 #include <linux/module.h>
 #include <linux/sigma.h>
 
-/* Return: 0==OK, <0==error, =1 ==no more actions */
+static size_t sigma_action_size(struct sigma_action *sa)
+{
+       size_t payload = 0;
+
+       switch (sa->instr) {
+       case SIGMA_ACTION_WRITEXBYTES:
+       case SIGMA_ACTION_WRITESINGLE:
+       case SIGMA_ACTION_WRITESAFELOAD:
+               payload = sigma_action_len(sa);
+               break;
+       default:
+               break;
+       }
+
+       payload = ALIGN(payload, 2);
+
+       return payload + sizeof(struct sigma_action);
+}
+
+/*
+ * Returns a negative error value in case of an error, 0 if processing of
+ * the firmware should be stopped after this action, 1 otherwise.
+ */
 static int
-process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
+process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
 {
-       struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
        size_t len = sigma_action_len(sa);
-       int ret = 0;
+       int ret;
 
        pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
                sa->instr, sa->addr, len);
@@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
        case SIGMA_ACTION_WRITEXBYTES:
        case SIGMA_ACTION_WRITESINGLE:
        case SIGMA_ACTION_WRITESAFELOAD:
-               if (ssfw->fw->size < ssfw->pos + len)
-                       return -EINVAL;
                ret = i2c_master_send(client, (void *)&sa->addr, len);
                if (ret < 0)
                        return -EINVAL;
                break;
-
        case SIGMA_ACTION_DELAY:
-               ret = 0;
                udelay(len);
                len = 0;
                break;
-
        case SIGMA_ACTION_END:
-               return 1;
-
+               return 0;
        default:
                return -EINVAL;
        }
 
-       /* when arrive here ret=0 or sent data */
-       ssfw->pos += sigma_action_size(sa, len);
-       return ssfw->pos == ssfw->fw->size;
+       return 1;
 }
 
 static int
 process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
 {
-       pr_debug("%s: processing %p\n", __func__, ssfw);
+       struct sigma_action *sa;
+       size_t size;
+       int ret;
+
+       while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
+               sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
+
+               size = sigma_action_size(sa);
+               ssfw->pos += size;
+               if (ssfw->pos > ssfw->fw->size || size == 0)
+                       break;
+
+               ret = process_sigma_action(client, sa);
 
-       while (1) {
-               int ret = process_sigma_action(client, ssfw);
                pr_debug("%s: action returned %i\n", __func__, ret);
-               if (ret == 1)
-                       return 0;
-               else if (ret)
+
+               if (ret <= 0)
                        return ret;
        }
+
+       if (ssfw->pos != ssfw->fw->size)
+               return -EINVAL;
+
+       return 0;
 }
 
 int process_sigma_firmware(struct i2c_client *client, const char *name)
@@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
 
        /* then verify the header */
        ret = -EINVAL;
-       if (fw->size < sizeof(*ssfw_head))
+
+       /*
+        * Reject too small or unreasonable large files. The upper limit has been
+        * chosen a bit arbitrarily, but it should be enough for all practical
+        * purposes and having the limit makes it easier to avoid integer
+        * overflows later in the loading process.
+        */
+       if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
                goto done;
 
        ssfw_head = (void *)fw->data;
        if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
                goto done;
 
-       crc = crc32(0, fw->data, fw->size);
+       crc = crc32(0, fw->data + sizeof(*ssfw_head),
+                       fw->size - sizeof(*ssfw_head));
        pr_debug("%s: crc=%x\n", __func__, crc);
-       if (crc != ssfw_head->crc)
+       if (crc != le32_to_cpu(ssfw_head->crc))
                goto done;
 
        ssfw.pos = sizeof(*ssfw_head);
index dbcb0bc..4e018d6 100644 (file)
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI)    += gpio-davinci.o
 obj-$(CONFIG_GPIO_EP93XX)      += gpio-ep93xx.o
 obj-$(CONFIG_GPIO_IT8761E)     += gpio-it8761e.o
 obj-$(CONFIG_GPIO_JANZ_TTL)    += gpio-janz-ttl.o
-obj-$(CONFIG_MACH_KS8695)      += gpio-ks8695.o
+obj-$(CONFIG_ARCH_KS8695)      += gpio-ks8695.o
 obj-$(CONFIG_GPIO_LANGWELL)    += gpio-langwell.o
 obj-$(CONFIG_ARCH_LPC32XX)     += gpio-lpc32xx.o
 obj-$(CONFIG_GPIO_MAX730X)     += gpio-max730x.o
index 038f5eb..f8ce29e 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/mfd/da9052/da9052.h>
 #include <linux/mfd/da9052/reg.h>
 #include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
 
 #define DA9052_INPUT                           1
 #define DA9052_OUTPUT_OPENDRAIN                2
@@ -43,6 +42,9 @@
 #define DA9052_GPIO_MASK_UPPER_NIBBLE          0xF0
 #define DA9052_GPIO_MASK_LOWER_NIBBLE          0x0F
 #define DA9052_GPIO_NIBBLE_SHIFT               4
+#define DA9052_IRQ_GPI0                        16
+#define DA9052_GPIO_ODD_SHIFT                  7
+#define DA9052_GPIO_EVEN_SHIFT                 3
 
 struct da9052_gpio {
        struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
 static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
 {
        struct da9052_gpio *gpio = to_da9052_gpio(gc);
-       unsigned char register_value = 0;
        int ret;
 
        if (da9052_gpio_port_odd(offset)) {
-               if (value) {
-                       register_value = DA9052_GPIO_ODD_PORT_MODE;
                        ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
                                                DA9052_GPIO_0_1_REG,
                                                DA9052_GPIO_ODD_PORT_MODE,
-                                               register_value);
+                                               value << DA9052_GPIO_ODD_SHIFT);
                        if (ret != 0)
                                dev_err(gpio->da9052->dev,
                                        "Failed to updated gpio odd reg,%d",
                                        ret);
-               }
        } else {
-               if (value) {
-                       register_value = DA9052_GPIO_EVEN_PORT_MODE;
                        ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
                                                DA9052_GPIO_0_1_REG,
                                                DA9052_GPIO_EVEN_PORT_MODE,
-                                               register_value);
+                                               value << DA9052_GPIO_EVEN_SHIFT);
                        if (ret != 0)
                                dev_err(gpio->da9052->dev,
                                        "Failed to updated gpio even reg,%d",
                                        ret);
-               }
        }
 }
 
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
        .direction_input = da9052_gpio_direction_input,
        .direction_output = da9052_gpio_direction_output,
        .to_irq = da9052_gpio_to_irq,
-       .can_sleep = 1;
-       .ngpio = 16;
-       .base = -1;
+       .can_sleep = 1,
+       .ngpio = 16,
+       .base = -1,
 };
 
 static int __devinit da9052_gpio_probe(struct platform_device *pdev)
index ea8e738..461958f 100644 (file)
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
                  &chip->reg->regs[chip->ch].imask);
 }
 
+static void ioh_irq_disable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct ioh_gpio *chip = gc->private;
+       unsigned long flags;
+       u32 ien;
+
+       spin_lock_irqsave(&chip->spinlock, flags);
+       ien = ioread32(&chip->reg->regs[chip->ch].ien);
+       ien &= ~(1 << (d->irq - chip->irq_base));
+       iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct ioh_gpio *chip = gc->private;
+       unsigned long flags;
+       u32 ien;
+
+       spin_lock_irqsave(&chip->spinlock, flags);
+       ien = ioread32(&chip->reg->regs[chip->ch].ien);
+       ien |= 1 << (d->irq - chip->irq_base);
+       iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
 static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
 {
        struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
        int i, j;
        int ret = IRQ_NONE;
 
-       for (i = 0; i < 8; i++) {
+       for (i = 0; i < 8; i++, chip++) {
                reg_val = ioread32(&chip->reg->regs[i].istatus);
                for (j = 0; j < num_ports[i]; j++) {
                        if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
        ct->chip.irq_mask = ioh_irq_mask;
        ct->chip.irq_unmask = ioh_irq_unmask;
        ct->chip.irq_set_type = ioh_irq_type;
+       ct->chip.irq_disable = ioh_irq_disable;
+       ct->chip.irq_enable = ioh_irq_enable;
 
        irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
                               IRQ_NOREQUEST | IRQ_NOPROBE, 0);
index ec3fcf0..5cd04b6 100644 (file)
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
        return 0;
 }
 
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       /* GPIO 28..31 are input only on MPC5121 */
+       if (gpio >= 28)
+               return -EINVAL;
+
+       return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
 static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 {
        struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
        mm_gc->save_regs = mpc8xxx_gpio_save_regs;
        gc->ngpio = MPC8XXX_GPIO_PINS;
        gc->direction_input = mpc8xxx_gpio_dir_in;
-       gc->direction_output = mpc8xxx_gpio_dir_out;
-       if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
-               gc->get = mpc8572_gpio_get;
-       else
-               gc->get = mpc8xxx_gpio_get;
+       gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+               mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+       gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+               mpc8572_gpio_get : mpc8xxx_gpio_get;
        gc->set = mpc8xxx_gpio_set;
        gc->to_irq = mpc8xxx_gpio_to_irq;
 
index 093c90b..4102f63 100644 (file)
@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
        int ret, irq, i;
        static DECLARE_BITMAP(init_irq, NR_IRQS);
 
-       pdata = dev->dev.platform_data;
-       if (pdata == NULL)
-               return -ENODEV;
-
        chip = kzalloc(sizeof(*chip), GFP_KERNEL);
        if (chip == NULL)
                return -ENOMEM;
index 3969f75..d2619d7 100644 (file)
@@ -456,6 +456,30 @@ done:
 EXPORT_SYMBOL(drm_crtc_helper_set_mode);
 
 
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+
+       /* Decouple all encoders and their attached connectors from this crtc */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc != crtc)
+                       continue;
+
+               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+                       if (connector->encoder != encoder)
+                               continue;
+
+                       connector->encoder = NULL;
+               }
+       }
+
+       drm_helper_disable_unused_functions(dev);
+       return 0;
+}
+
 /**
  * drm_crtc_helper_set_config - set a new config from userspace
  * @crtc: CRTC to setup
@@ -510,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                                (int)set->num_connectors, set->x, set->y);
        } else {
                DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
-               set->mode = NULL;
-               set->num_connectors = 0;
+               return drm_crtc_helper_disable(set->crtc);
        }
 
        dev = set->crtc->dev;
index d09a6e0..004b048 100644 (file)
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
        const struct intel_device_info *info = INTEL_INFO(dev);
 
        seq_printf(m, "gen: %d\n", info->gen);
+       seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
        B(is_mobile);
        B(is_i85x);
index a9533c5..a9ae374 100644 (file)
@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 
        diff1 = now - dev_priv->last_time1;
 
+       /* Prevent division-by-zero if we are asking too fast.
+        * Also, we don't get interesting results if we are polling
+        * faster than once in 10ms, so just return the saved value
+        * in such cases.
+        */
+       if (diff1 <= 10)
+               return dev_priv->chipset_power;
+
        count1 = I915_READ(DMIEC);
        count2 = I915_READ(DDREC);
        count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
        dev_priv->last_count1 = total_count;
        dev_priv->last_time1 = now;
 
+       dev_priv->chipset_power = ret;
+
        return ret;
 }
 
index 15bfa91..a1103fc 100644 (file)
@@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
 MODULE_PARM_DESC(powersave,
                "Enable powersavings, fbc, downclocking, etc. (default: true)");
 
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
 module_param_named(semaphores, i915_semaphores, int, 0600);
 MODULE_PARM_DESC(semaphores,
-               "Use semaphores for inter-ring sync (default: false)");
+               "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
 MODULE_PARM_DESC(i915_enable_rc6,
-               "Enable power-saving render C-state 6 (default: true)");
+               "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
 
 int i915_enable_fbc __read_mostly = -1;
 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
        }
 }
 
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
        int count;
 
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
                udelay(10);
 }
 
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+       int count;
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+               udelay(10);
+
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+       POSTING_READ(FORCEWAKE_MT);
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+               udelay(10);
+}
+
 /*
  * Generally this is called implicitly by the register read function. However,
  * if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 
        /* Forcewake is atomic in case we get in here without the lock */
        if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
-               __gen6_gt_force_wake_get(dev_priv);
+               dev_priv->display.force_wake_get(dev_priv);
 }
 
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE, 0);
        POSTING_READ(FORCEWAKE);
 }
 
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+       POSTING_READ(FORCEWAKE_MT);
+}
+
 /*
  * see gen6_gt_force_wake_get()
  */
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
        if (atomic_dec_and_test(&dev_priv->forcewake_count))
-               __gen6_gt_force_wake_put(dev_priv);
+               dev_priv->display.force_wake_put(dev_priv);
 }
 
 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
        (((dev_priv)->info->gen >= 6) && \
-       ((reg) < 0x40000) && \
-       ((reg) != FORCEWAKE))
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE) &&         \
+        ((reg) != ECOBUS))
 
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
index 4a9c1b9..554bef7 100644 (file)
@@ -107,6 +107,7 @@ struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
 struct opregion_asle;
+struct drm_i915_private;
 
 struct intel_opregion {
        struct opregion_header *header;
@@ -221,6 +222,8 @@ struct drm_i915_display_funcs {
                          struct drm_i915_gem_object *obj);
        int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                            int x, int y);
+       void (*force_wake_get)(struct drm_i915_private *dev_priv);
+       void (*force_wake_put)(struct drm_i915_private *dev_priv);
        /* clock updates for mode set */
        /* cursor updates */
        /* render clock increase/decrease */
@@ -710,6 +713,7 @@ typedef struct drm_i915_private {
 
        u64 last_count1;
        unsigned long last_time1;
+       unsigned long chipset_power;
        u64 last_count2;
        struct timespec last_time2;
        unsigned long gfx_power;
@@ -998,11 +1002,11 @@ extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
 extern int i915_panel_ignore_lid __read_mostly;
 extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
 extern unsigned int i915_lvds_downclock __read_mostly;
 extern int i915_panel_use_ssc __read_mostly;
 extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
 extern int i915_enable_fbc __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
 
@@ -1308,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
 /* overlay */
 #ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1352,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
        (((dev_priv)->info->gen >= 6) && \
-       ((reg) < 0x40000) && \
-       ((reg) != FORCEWAKE))
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE) &&         \
+        ((reg) != ECOBUS))
 
 #define __i915_read(x, y) \
        u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
index 3693e83..c681dc1 100644 (file)
@@ -32,6 +32,7 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include <linux/dma_remapping.h>
 
 struct change_domains {
        uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
        return 0;
 }
 
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+       if (INTEL_INFO(dev)->gen < 6)
+               return 0;
+
+       if (i915_semaphores >= 0)
+               return i915_semaphores;
+
+       /* Enable semaphores on SNB when IO remapping is off */
+       if (INTEL_INFO(dev)->gen == 6)
+               return !intel_iommu_enabled;
+
+       return 1;
+}
+
 static int
 i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
                               struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
                return 0;
 
        /* XXX gpu semaphores are implicated in various hard hangs on SNB */
-       if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+       if (!intel_enable_semaphores(obj->base.dev))
                return i915_gem_object_wait_rendering(obj);
 
        idx = intel_ring_sync_index(from, to);
index b080cc8..a26d5b0 100644 (file)
 /* or SDVOB */
 #define HDMIB   0xe1140
 #define  PORT_ENABLE    (1 << 31)
-#define  TRANSCODER_A   (0)
-#define  TRANSCODER_B   (1 << 30)
-#define  TRANSCODER(pipe)      ((pipe) << 30)
-#define  TRANSCODER_MASK   (1 << 30)
+#define  TRANSCODER(pipe)       ((pipe) << 30)
+#define  TRANSCODER_CPT(pipe)   ((pipe) << 29)
+#define  TRANSCODER_MASK        (1 << 30)
+#define  TRANSCODER_MASK_CPT    (3 << 29)
 #define  COLOR_FORMAT_8bpc      (0)
 #define  COLOR_FORMAT_12bpc     (3 << 26)
 #define  SDVOB_HOTPLUG_ENABLE   (1 << 23)
 #define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B   (0x38<<22)
 #define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB       (0x3f<<22)
 
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB           (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB         (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB           (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB           (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB         (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB           (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB         (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB           (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB          (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB         (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB                (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB          (0x23 <<22)
+
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB       (0x3f<<22)
+
 #define  FORCEWAKE                             0xA18C
 #define  FORCEWAKE_ACK                         0x130090
+#define  FORCEWAKE_MT                          0xa188 /* multi-threaded */
+#define  FORCEWAKE_MT_ACK                      0x130040
+#define  ECOBUS                                        0xa180
+#define    FORCEWAKE_MT_ENABLE                 (1<<5)
 
 #define  GT_FIFO_FREE_ENTRIES                  0x120008
 #define    GT_FIFO_NUM_RESERVED_ENTRIES                20
index e77a863..d809b03 100644 (file)
@@ -38,8 +38,8 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "drm_dp_helper.h"
-
 #include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
 
 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 
@@ -4670,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 /**
  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
  * @crtc: CRTC structure
+ * @mode: requested mode
  *
  * A pipe may be connected to one or more outputs.  Based on the depth of the
  * attached framebuffer, choose a good color depth to use on the pipe.
@@ -4681,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
  *    Displays may support a restricted set as well, check EDID and clamp as
  *      appropriate.
+ *    DP may want to dither down to 6bpc to fit larger modes
  *
  * RETURNS:
  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
  * true if they don't match).
  */
 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
-                                        unsigned int *pipe_bpp)
+                                        unsigned int *pipe_bpp,
+                                        struct drm_display_mode *mode)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4758,6 +4761,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                }
        }
 
+       if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+               DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+               display_bpc = 6;
+       }
+
        /*
         * We could just drive the pipe at the highest bpc all the time and
         * enable dithering as needed, but that costs bandwidth.  So choose
@@ -5019,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                        pipeconf &= ~PIPECONF_DOUBLE_WIDE;
        }
 
+       /* default to 8bpc */
+       pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+       if (is_dp) {
+               if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+                       pipeconf |= PIPECONF_BPP_6 |
+                                   PIPECONF_DITHER_EN |
+                                   PIPECONF_DITHER_TYPE_SP;
+               }
+       }
+
        dpll |= DPLL_VCO_ENABLE;
 
        DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5480,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        /* determine panel color depth */
        temp = I915_READ(PIPECONF(pipe));
        temp &= ~PIPE_BPC_MASK;
-       dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+       dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
        switch (pipe_bpp) {
        case 18:
                temp |= PIPE_6BPC;
@@ -7189,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        work->old_fb_obj = intel_fb->obj;
        INIT_WORK(&work->work, intel_unpin_work_fn);
 
+       ret = drm_vblank_get(dev, intel_crtc->pipe);
+       if (ret)
+               goto free_work;
+
        /* We borrow the event spin lock for protecting unpin_work */
        spin_lock_irqsave(&dev->event_lock, flags);
        if (intel_crtc->unpin_work) {
                spin_unlock_irqrestore(&dev->event_lock, flags);
                kfree(work);
+               drm_vblank_put(dev, intel_crtc->pipe);
 
                DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
                return -EBUSY;
@@ -7212,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        crtc->fb = fb;
 
-       ret = drm_vblank_get(dev, intel_crtc->pipe);
-       if (ret)
-               goto cleanup_objs;
-
        work->pending_flip_obj = obj;
 
        work->enable_stall_check = true;
@@ -7238,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 cleanup_pending:
        atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
        drm_gem_object_unreference(&work->old_fb_obj->base);
        drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
@@ -7247,6 +7265,8 @@ cleanup_objs:
        intel_crtc->unpin_work = NULL;
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
+       drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
        kfree(work);
 
        return ret;
@@ -7887,6 +7907,33 @@ void intel_init_emon(struct drm_device *dev)
        dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+       /*
+        * Respect the kernel parameter if it is set
+        */
+       if (i915_enable_rc6 >= 0)
+               return i915_enable_rc6;
+
+       /*
+        * Disable RC6 on Ironlake
+        */
+       if (INTEL_INFO(dev)->gen == 5)
+               return 0;
+
+       /*
+        * Enable rc6 on Sandybridge if DMA remapping is disabled
+        */
+       if (INTEL_INFO(dev)->gen == 6) {
+               DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
+                                intel_iommu_enabled ? "true" : "false",
+                                !intel_iommu_enabled ? "en" : "dis");
+               return !intel_iommu_enabled;
+       }
+       DRM_DEBUG_DRIVER("RC6 enabled\n");
+       return 1;
+}
+
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
        u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7923,7 +7970,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
        I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
-       if (i915_enable_rc6)
+       if (intel_enable_rc6(dev_priv->dev))
                rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
                        GEN6_RC_CTL_RC6_ENABLE;
 
@@ -8372,7 +8419,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
        /* rc6 disabled by default due to repeated reports of hanging during
         * boot and resume.
         */
-       if (!i915_enable_rc6)
+       if (!intel_enable_rc6(dev))
                return;
 
        mutex_lock(&dev->struct_mutex);
@@ -8491,6 +8538,28 @@ static void intel_init_display(struct drm_device *dev)
 
        /* For FIFO watermark updates */
        if (HAS_PCH_SPLIT(dev)) {
+               dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+               dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+               /* IVB configs may use multi-threaded forcewake */
+               if (IS_IVYBRIDGE(dev)) {
+                       u32     ecobus;
+
+                       mutex_lock(&dev->struct_mutex);
+                       __gen6_gt_force_wake_mt_get(dev_priv);
+                       ecobus = I915_READ(ECOBUS);
+                       __gen6_gt_force_wake_mt_put(dev_priv);
+                       mutex_unlock(&dev->struct_mutex);
+
+                       if (ecobus & FORCEWAKE_MT_ENABLE) {
+                               DRM_DEBUG_KMS("Using MT version of forcewake\n");
+                               dev_priv->display.force_wake_get =
+                                       __gen6_gt_force_wake_mt_get;
+                               dev_priv->display.force_wake_put =
+                                       __gen6_gt_force_wake_mt_put;
+                       }
+               }
+
                if (HAS_PCH_IBX(dev))
                        dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
                else if (HAS_PCH_CPT(dev))
index 4d0358f..92b041b 100644 (file)
@@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
  */
 
 static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
 {
        struct drm_crtc *crtc = intel_dp->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int bpp = 24;
 
-       if (intel_crtc)
+       if (check_bpp)
+               bpp = check_bpp;
+       else if (intel_crtc)
                bpp = intel_crtc->bpp;
 
        return (pixel_clock * bpp + 9) / 10;
@@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
        int max_lanes = intel_dp_max_lane_count(intel_dp);
+       int max_rate, mode_rate;
 
        if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
                if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
                        return MODE_PANEL;
        }
 
-       if (intel_dp_link_required(intel_dp, mode->clock)
-           > intel_dp_max_data_rate(max_link_clock, max_lanes))
-               return MODE_CLOCK_HIGH;
+       mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+       max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+       if (mode_rate > max_rate) {
+                       mode_rate = intel_dp_link_required(intel_dp,
+                                                          mode->clock, 18);
+                       if (mode_rate > max_rate)
+                               return MODE_CLOCK_HIGH;
+                       else
+                               mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+       }
 
        if (mode->clock < 10000)
                return MODE_CLOCK_LOW;
@@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
         * clock divider.
         */
        if (is_cpu_edp(intel_dp)) {
-               if (IS_GEN6(dev))
-                       aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+               if (IS_GEN6(dev) || IS_GEN7(dev))
+                       aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
                else
                        aux_clock_divider = 225; /* eDP input clock at 450Mhz */
        } else if (HAS_PCH_SPLIT(dev))
@@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
        int lane_count, clock;
        int max_lane_count = intel_dp_max_lane_count(intel_dp);
        int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+       int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
        static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
        if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
                for (clock = 0; clock <= max_clock; clock++) {
                        int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
-                       if (intel_dp_link_required(intel_dp, mode->clock)
+                       if (intel_dp_link_required(intel_dp, mode->clock, bpp)
                                        <= link_avail) {
                                intel_dp->link_bw = bws[clock];
                                intel_dp->lane_count = lane_count;
@@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        }
 
        /*
-        * There are three kinds of DP registers:
+        * There are four kinds of DP registers:
         *
         *      IBX PCH
-        *      CPU
+        *      SNB CPU
+        *      IVB CPU
         *      CPT PCH
         *
         * IBX PCH and CPU are the same for almost everything,
@@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
        /* Split out the IBX/CPU vs CPT settings */
 
-       if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+       if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+                       intel_dp->DP |= DP_SYNC_HS_HIGH;
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+                       intel_dp->DP |= DP_SYNC_VS_HIGH;
+               intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+               if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+                       intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+               intel_dp->DP |= intel_crtc->pipe << 29;
+
+               /* don't miss out required setting for eDP */
+               intel_dp->DP |= DP_PLL_ENABLE;
+               if (adjusted_mode->clock < 200000)
+                       intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+               else
+                       intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+       } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
                intel_dp->DP |= intel_dp->color_range;
 
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1375,34 +1406,59 @@ static char     *link_train_names[] = {
  * These are source-specific values; current Intel hardware supports
  * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
  */
-#define I830_DP_VOLTAGE_MAX        DP_TRAIN_VOLTAGE_SWING_800
-#define I830_DP_VOLTAGE_MAX_CPT            DP_TRAIN_VOLTAGE_SWING_1200
 
 static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
-       switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-       case DP_TRAIN_VOLTAGE_SWING_400:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_600:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_800:
-               return DP_TRAIN_PRE_EMPHASIS_3_5;
-       case DP_TRAIN_VOLTAGE_SWING_1200:
-       default:
-               return DP_TRAIN_PRE_EMPHASIS_0;
+       struct drm_device *dev = intel_dp->base.base.dev;
+
+       if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+               return DP_TRAIN_VOLTAGE_SWING_800;
+       else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               return DP_TRAIN_VOLTAGE_SWING_1200;
+       else
+               return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+       struct drm_device *dev = intel_dp->base.base.dev;
+
+       if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_400:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_600:
+               case DP_TRAIN_VOLTAGE_SWING_800:
+                       return DP_TRAIN_PRE_EMPHASIS_3_5;
+               default:
+                       return DP_TRAIN_PRE_EMPHASIS_0;
+               }
+       } else {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_400:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_600:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_800:
+                       return DP_TRAIN_PRE_EMPHASIS_3_5;
+               case DP_TRAIN_VOLTAGE_SWING_1200:
+               default:
+                       return DP_TRAIN_PRE_EMPHASIS_0;
+               }
        }
 }
 
 static void
 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
        uint8_t v = 0;
        uint8_t p = 0;
        int lane;
        uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
-       int voltage_max;
+       uint8_t voltage_max;
+       uint8_t preemph_max;
 
        for (lane = 0; lane < intel_dp->lane_count; lane++) {
                uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
                        p = this_p;
        }
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
-               voltage_max = I830_DP_VOLTAGE_MAX_CPT;
-       else
-               voltage_max = I830_DP_VOLTAGE_MAX;
+       voltage_max = intel_dp_voltage_max(intel_dp);
        if (v >= voltage_max)
                v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 
-       if (p >= intel_dp_pre_emphasis_max(v))
-               p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+       preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+       if (p >= preemph_max)
+               p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
        for (lane = 0; lane < 4; lane++)
                intel_dp->train_set[lane] = v | p;
@@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
        }
 }
 
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+                                        DP_TRAIN_PRE_EMPHASIS_MASK);
+       switch (signal_levels) {
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_400MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+               return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_600MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_800MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+       default:
+               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+                             "0x%x\n", signal_levels);
+               return EDP_LINK_TRAIN_500MV_0DB_IVB;
+       }
+}
+
 static uint8_t
 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
                      int lane)
@@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                                  DP_LINK_CONFIGURATION_SIZE);
 
        DP |= DP_PORT_EN;
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
        else
                DP &= ~DP_LINK_TRAIN_MASK;
@@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                uint8_t     link_status[DP_LINK_STATUS_SIZE];
                uint32_t    signal_levels;
 
-               if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+
+               if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+                       signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+               } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
                        signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
@@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
 
-               if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                        reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
                else
                        reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        break;
                }
 
-               if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+               if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+                       signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+               } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
                        signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
@@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
 
-               if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                        reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
                else
                        reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                ++tries;
        }
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                reg = DP | DP_LINK_TRAIN_OFF_CPT;
        else
                reg = DP | DP_LINK_TRAIN_OFF;
@@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
                udelay(100);
        }
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
                I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
        } else {
@@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
        msleep(17);
 
        if (is_edp(intel_dp)) {
-               if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                        DP |= DP_LINK_TRAIN_OFF_CPT;
                else
                        DP |= DP_LINK_TRAIN_OFF;
index bd9a604..a1b4343 100644 (file)
 /* drm_display_mode->private_flags */
 #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
 #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
 
 static inline void
 intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
index 42f165a..e441911 100644 (file)
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Asus AT5NM10T-I",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+               },
+       },
 
        { }     /* terminating entry */
 };
index 21f60b7..04d79fd 100644 (file)
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                max >>= 16;
        } else {
-               if (IS_PINEVIEW(dev)) {
+               if (INTEL_INFO(dev)->gen < 4)
                        max >>= 17;
-               } else {
+               else
                        max >>= 16;
-                       if (INTEL_INFO(dev)->gen < 4)
-                               max &= ~1;
-               }
 
                if (is_backlight_combination_mode(dev))
                        max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
                val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
        } else {
                val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-               if (IS_PINEVIEW(dev))
+               if (INTEL_INFO(dev)->gen < 4)
                        val >>= 1;
 
                if (is_backlight_combination_mode(dev)) {
                        u8 lbpc;
 
-                       val &= ~1;
                        pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
                        val *= lbpc;
                }
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
        }
 
        tmp = I915_READ(BLC_PWM_CTL);
-       if (IS_PINEVIEW(dev)) {
-               tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+       if (INTEL_INFO(dev)->gen < 4) 
                level <<= 1;
-       } else
-               tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+       tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
        I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
 
index 3003fb2..f7b9268 100644 (file)
@@ -50,6 +50,7 @@
 #define IS_TMDS(c)     (c->output_flag & SDVO_TMDS_MASK)
 #define IS_LVDS(c)     (c->output_flag & SDVO_LVDS_MASK)
 #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
 
 
 static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                }
                sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
        }
-       if (intel_crtc->pipe == 1)
-               sdvox |= SDVO_PIPE_B_SELECT;
+
+       if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+               sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+       else
+               sdvox |= TRANSCODER(intel_crtc->pipe);
+
        if (intel_sdvo->has_hdmi_audio)
                sdvox |= SDVO_AUDIO_ENABLE;
 
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
        return status;
 }
 
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+                                 struct edid *edid)
+{
+       bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+       bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+       DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+                     connector_is_digital, monitor_is_digital);
+       return connector_is_digital == monitor_is_digital;
+}
+
 static enum drm_connector_status
 intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
                if (edid == NULL)
                        edid = intel_sdvo_get_analog_edid(connector);
                if (edid != NULL) {
-                       if (edid->input & DRM_EDID_INPUT_DIGITAL)
-                               ret = connector_status_disconnected;
-                       else
+                       if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+                                                             edid))
                                ret = connector_status_connected;
+                       else
+                               ret = connector_status_disconnected;
+
                        connector->display_info.raw_edid = NULL;
                        kfree(edid);
                } else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
                edid = intel_sdvo_get_analog_edid(connector);
 
        if (edid != NULL) {
-               struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-               bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
-               bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
-               if (connector_is_digital == monitor_is_digital) {
+               if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+                                                     edid)) {
                        drm_mode_connector_update_edid_property(connector, edid);
                        drm_add_edid_modes(connector, edid);
                }
index ddbabef..b12fd2c 100644 (file)
@@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
        spin_unlock_irqrestore(&dev->event_lock, flags);
        return 0;
 }
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+                           struct drm_mode_create_dumb *args)
+{
+       struct nouveau_bo *bo;
+       int ret;
+
+       args->pitch = roundup(args->width * (args->bpp / 8), 256);
+       args->size = args->pitch * args->height;
+       args->size = roundup(args->size, PAGE_SIZE);
+
+       ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+       if (ret)
+               return ret;
+
+       ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
+       drm_gem_object_unreference_unlocked(bo->gem);
+       return ret;
+}
+
+int
+nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+                            uint32_t handle)
+{
+       return drm_gem_handle_delete(file_priv, handle);
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+                               struct drm_device *dev,
+                               uint32_t handle, uint64_t *poffset)
+{
+       struct drm_gem_object *gem;
+
+       gem = drm_gem_object_lookup(dev, file_priv, handle);
+       if (gem) {
+               struct nouveau_bo *bo = gem->driver_private;
+               *poffset = bo->bo.addr_space_offset;
+               drm_gem_object_unreference_unlocked(gem);
+               return 0;
+       }
+
+       return -ENOENT;
+}
index 9f7bb12..9791d13 100644 (file)
@@ -433,6 +433,10 @@ static struct drm_driver driver = {
        .gem_open_object = nouveau_gem_object_open,
        .gem_close_object = nouveau_gem_object_close,
 
+       .dumb_create = nouveau_display_dumb_create,
+       .dumb_map_offset = nouveau_display_dumb_map_offset,
+       .dumb_destroy = nouveau_display_dumb_destroy,
+
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
 #ifdef GIT_REVISION
index 29837da..4c0be3a 100644 (file)
@@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                           struct drm_pending_vblank_event *event);
 int nouveau_finish_page_flip(struct nouveau_channel *,
                             struct nouveau_page_flip_state *);
+int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+                               struct drm_mode_create_dumb *args);
+int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+                                   uint32_t handle, uint64_t *offset);
+int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+                                uint32_t handle);
 
 /* nv10_gpio.c */
 int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
index 02222c5..960c0ae 100644 (file)
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
                return ret;
        }
 
-       ret = drm_mm_init(&chan->ramin_heap, base, size);
+       ret = drm_mm_init(&chan->ramin_heap, base, size - base);
        if (ret) {
                NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
                nouveau_gpuobj_ref(NULL, &chan->ramin);
index b75258a..c8a463b 100644 (file)
@@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be)
                        pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
                                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                }
+               nvbe->unmap_pages = false;
        }
+
+       nvbe->pages = NULL;
 }
 
 static void
index d23ca00..06de250 100644 (file)
@@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv50_display *disp = nv50_display(dev);
        u32 unk30 = nv_rd32(dev, 0x610030), mc;
-       int i, crtc, or, type = OUTPUT_ANY;
+       int i, crtc, or = 0, type = OUTPUT_ANY;
 
        NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
        disp->irq.dcb = NULL;
@@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
        struct nv50_display *disp = nv50_display(dev);
        u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
        struct dcb_entry *dcb;
-       int i, crtc, or, type = OUTPUT_ANY;
+       int i, crtc, or = 0, type = OUTPUT_ANY;
 
        NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
        dcb = disp->irq.dcb;
index a74e501..ecfafd7 100644 (file)
@@ -381,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
        u8  tpnr[GPC_MAX];
        int i, gpc, tpc;
 
+       nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
        /*
         *      TP      ROP UNKVAL(magic_not_rop_nr)
         * 450: 4/0/0/0 2        3
index 23d63b4..cb006a7 100644 (file)
@@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
                        continue;
 
                if (nv_partner != nv_encoder &&
-                   nv_partner->dcb->or == nv_encoder->or) {
+                   nv_partner->dcb->or == nv_encoder->dcb->or) {
                        if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
                                return;
                        break;
index 87631fe..2b97262 100644 (file)
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
-       if (tiling_flags & RADEON_TILING_MACRO)
+       if (tiling_flags & RADEON_TILING_MACRO) {
+               if (rdev->family >= CHIP_CAYMAN)
+                       tmp = rdev->config.cayman.tile_config;
+               else
+                       tmp = rdev->config.evergreen.tile_config;
+
+               switch ((tmp & 0xf0) >> 4) {
+               case 0: /* 4 banks */
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+                       break;
+               case 1: /* 8 banks */
+               default:
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+                       break;
+               case 2: /* 16 banks */
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+                       break;
+               }
+
+               switch ((tmp & 0xf000) >> 12) {
+               case 0: /* 1KB rows */
+               default:
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+                       break;
+               case 1: /* 2KB rows */
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+                       break;
+               case 2: /* 4KB rows */
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+                       break;
+               }
+
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-       else if (tiling_flags & RADEON_TILING_MICRO)
+       else if (tiling_flags & RADEON_TILING_MICRO)
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
        switch (radeon_crtc->crtc_id) {
index 1d603a3..5e00d16 100644 (file)
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index 38e1bda..cd4590a 100644 (file)
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
        u32                     group_size;
        u32                     nbanks;
        u32                     npipes;
+       u32                     row_size;
        /* value we track */
        u32                     nsamples;
        u32                     cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
        struct radeon_bo        *db_s_write_bo;
 };
 
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+       if (tiling_flags & RADEON_TILING_MACRO)
+               return ARRAY_2D_TILED_THIN1;
+       else if (tiling_flags & RADEON_TILING_MICRO)
+               return ARRAY_1D_TILED_THIN1;
+       else
+               return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+       switch (nbanks) {
+       case 2:
+               return ADDR_SURF_2_BANK;
+       case 4:
+               return ADDR_SURF_4_BANK;
+       case 8:
+       default:
+               return ADDR_SURF_8_BANK;
+       case 16:
+               return ADDR_SURF_16_BANK;
+       }
+}
+
+static u32 evergreen_cs_get_tile_split(u32 row_size)
+{
+       switch (row_size) {
+       case 1:
+       default:
+               return ADDR_SURF_TILE_SPLIT_1KB;
+       case 2:
+               return ADDR_SURF_TILE_SPLIT_2KB;
+       case 4:
+               return ADDR_SURF_TILE_SPLIT_4KB;
+       }
+}
+
 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
 {
        int i;
@@ -490,12 +529,11 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        }
                        ib[idx] &= ~Z_ARRAY_MODE(0xf);
                        track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+                       ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                        if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                               ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                               track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       } else {
-                               ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                               track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+                               ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                               ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
                        }
                }
                break;
@@ -618,13 +656,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                                "0x%04X\n", reg);
                                return -EINVAL;
                        }
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                       }
+                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                }
                break;
        case CB_COLOR8_INFO:
@@ -640,13 +673,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                                "0x%04X\n", reg);
                                return -EINVAL;
                        }
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                       }
+                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                }
                break;
        case CB_COLOR0_PITCH:
@@ -701,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR9_ATTRIB:
        case CB_COLOR10_ATTRIB:
        case CB_COLOR11_ATTRIB:
+               r = evergreen_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       dev_warn(p->dev, "bad SET_CONTEXT_REG "
+                                       "0x%04X\n", reg);
+                       return -EINVAL;
+               }
+               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                       ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                       ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+               }
                break;
        case CB_COLOR0_DIM:
        case CB_COLOR1_DIM:
@@ -1318,10 +1356,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                }
                                ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
                                if (!p->keep_tiling_flags) {
-                                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-                                               ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                                       else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-                                               ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+                                       ib[idx+1+(i*8)+1] |=
+                                               TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                                               ib[idx+1+(i*8)+6] |=
+                                                       TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+                                               ib[idx+1+(i*8)+7] |=
+                                                       TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                                       }
                                }
                                texture = reloc->robj;
                                /* tex mip base */
@@ -1422,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
 {
        struct radeon_cs_packet pkt;
        struct evergreen_cs_track *track;
+       u32 tmp;
        int r;
 
        if (p->track == NULL) {
@@ -1430,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
                if (track == NULL)
                        return -ENOMEM;
                evergreen_cs_track_init(track);
-               track->npipes = p->rdev->config.evergreen.tiling_npipes;
-               track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
-               track->group_size = p->rdev->config.evergreen.tiling_group_size;
+               if (p->rdev->family >= CHIP_CAYMAN)
+                       tmp = p->rdev->config.cayman.tile_config;
+               else
+                       tmp = p->rdev->config.evergreen.tile_config;
+
+               switch (tmp & 0xf) {
+               case 0:
+                       track->npipes = 1;
+                       break;
+               case 1:
+               default:
+                       track->npipes = 2;
+                       break;
+               case 2:
+                       track->npipes = 4;
+                       break;
+               case 3:
+                       track->npipes = 8;
+                       break;
+               }
+
+               switch ((tmp & 0xf0) >> 4) {
+               case 0:
+                       track->nbanks = 4;
+                       break;
+               case 1:
+               default:
+                       track->nbanks = 8;
+                       break;
+               case 2:
+                       track->nbanks = 16;
+                       break;
+               }
+
+               switch ((tmp & 0xf00) >> 8) {
+               case 0:
+                       track->group_size = 256;
+                       break;
+               case 1:
+               default:
+                       track->group_size = 512;
+                       break;
+               }
+
+               switch ((tmp & 0xf000) >> 12) {
+               case 0:
+                       track->row_size = 1;
+                       break;
+               case 1:
+               default:
+                       track->row_size = 2;
+                       break;
+               case 2:
+                       track->row_size = 4;
+                       break;
+               }
+
                p->track = track;
        }
        do {
index c781c92..7d7f215 100644 (file)
 #       define EVERGREEN_GRPH_DEPTH_8BPP                0
 #       define EVERGREEN_GRPH_DEPTH_16BPP               1
 #       define EVERGREEN_GRPH_DEPTH_32BPP               2
+#       define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define EVERGREEN_ADDR_SURF_2_BANK               0
+#       define EVERGREEN_ADDR_SURF_4_BANK               1
+#       define EVERGREEN_ADDR_SURF_8_BANK               2
+#       define EVERGREEN_ADDR_SURF_16_BANK              3
+#       define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
 #       define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
 /* 8 BPP */
 #       define EVERGREEN_GRPH_FORMAT_INDEXED            0
 #       define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
 #       define EVERGREEN_GRPH_FORMAT_RGB111110          6
 #       define EVERGREEN_GRPH_FORMAT_BGR101111          7
+#       define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
+#       define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
 #       define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
 #       define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
 #       define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
index b937c49..e00039e 100644 (file)
 #define DB_HTILE_DATA_BASE                             0x28014
 #define DB_Z_INFO                                      0x28040
 #       define Z_ARRAY_MODE(x)                          ((x) << 4)
+#       define DB_TILE_SPLIT(x)                         (((x) & 0x7) << 8)
+#       define DB_NUM_BANKS(x)                          (((x) & 0x3) << 12)
+#       define DB_BANK_WIDTH(x)                         (((x) & 0x3) << 16)
+#       define DB_BANK_HEIGHT(x)                        (((x) & 0x3) << 20)
 #define DB_STENCIL_INFO                                        0x28044
 #define DB_Z_READ_BASE                                 0x28048
 #define DB_STENCIL_READ_BASE                           0x2804c
 #      define CB_SF_EXPORT_FULL                        0
 #      define CB_SF_EXPORT_NORM                        1
 #define        CB_COLOR0_ATTRIB                                0x28c74
+#       define CB_TILE_SPLIT(x)                         (((x) & 0x7) << 5)
+#       define ADDR_SURF_TILE_SPLIT_64B                 0
+#       define ADDR_SURF_TILE_SPLIT_128B                1
+#       define ADDR_SURF_TILE_SPLIT_256B                2
+#       define ADDR_SURF_TILE_SPLIT_512B                3
+#       define ADDR_SURF_TILE_SPLIT_1KB                 4
+#       define ADDR_SURF_TILE_SPLIT_2KB                 5
+#       define ADDR_SURF_TILE_SPLIT_4KB                 6
+#       define CB_NUM_BANKS(x)                          (((x) & 0x3) << 10)
+#       define ADDR_SURF_2_BANK                         0
+#       define ADDR_SURF_4_BANK                         1
+#       define ADDR_SURF_8_BANK                         2
+#       define ADDR_SURF_16_BANK                        3
+#       define CB_BANK_WIDTH(x)                         (((x) & 0x3) << 13)
+#       define ADDR_SURF_BANK_WIDTH_1                   0
+#       define ADDR_SURF_BANK_WIDTH_2                   1
+#       define ADDR_SURF_BANK_WIDTH_4                   2
+#       define ADDR_SURF_BANK_WIDTH_8                   3
+#       define CB_BANK_HEIGHT(x)                        (((x) & 0x3) << 16)
+#       define ADDR_SURF_BANK_HEIGHT_1                  0
+#       define ADDR_SURF_BANK_HEIGHT_2                  1
+#       define ADDR_SURF_BANK_HEIGHT_4                  2
+#       define ADDR_SURF_BANK_HEIGHT_8                  3
 #define        CB_COLOR0_DIM                                   0x28c78
 /* only CB0-7 blocks have these regs */
 #define        CB_COLOR0_CMASK                                 0x28c7c
 #      define SQ_SEL_1                                 5
 #define SQ_TEX_RESOURCE_WORD5_0                         0x30014
 #define SQ_TEX_RESOURCE_WORD6_0                         0x30018
+#       define TEX_TILE_SPLIT(x)                        (((x) & 0x7) << 29)
 #define SQ_TEX_RESOURCE_WORD7_0                         0x3001c
+#       define TEX_BANK_WIDTH(x)                        (((x) & 0x3) << 8)
+#       define TEX_BANK_HEIGHT(x)                       (((x) & 0x3) << 10)
+#       define TEX_NUM_BANKS(x)                         (((x) & 0x3) << 16)
 
 #define SQ_VTX_CONSTANT_WORD0_0                                0x30000
 #define SQ_VTX_CONSTANT_WORD1_0                                0x30004
index ad158ea..bfc08f6 100644 (file)
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+       int i;
 
        /* Lock the graphics update lock */
        /* update the scanout addresses */
        WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index 3f6636b..3516a60 100644 (file)
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
 
        /* Fail only if calling the method fails and ATIF is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-               printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+               DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+                                acpi_format_exception(status));
                kfree(buffer.pointer);
                return 1;
        }
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
        acpi_handle handle;
        int ret;
 
-       /* No need to proceed if we're sure that ATIF is not supported */
-       if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
-               return 0;
-
        /* Get the device handle */
        handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
 
+       /* No need to proceed if we're sure that ATIF is not supported */
+       if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+               return 0;
+
        /* Call the ATIF method */
        ret = radeon_atif_call(handle);
        if (ret)
index 06e413e..4b27efa 100644 (file)
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_TRAVIS:
                case ENCODER_OBJECT_ID_NUTMEG:
-                       return true;
+                       return radeon_encoder->encoder_id;
                default:
-                       return false;
+                       return ENCODER_OBJECT_ID_NONE;
                }
        }
-
-       return false;
+       return ENCODER_OBJECT_ID_NONE;
 }
 
 void radeon_panel_mode_fixup(struct drm_encoder *encoder,
index 481b99e..b1053d6 100644 (file)
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index a983f41..23ae1c6 100644 (file)
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index 8cca91a..dc27970 100644 (file)
@@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
                             struct ttm_object_file *tfile,
                             int id,
                             struct vmw_resource **p_res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+                                 struct ttm_object_file *tfile,
+                                 uint32_t handle,
+                                 struct vmw_surface **out_surf,
+                                 struct vmw_dma_buffer **out_buf);
 extern void vmw_surface_res_free(struct vmw_resource *res);
 extern int vmw_surface_init(struct vmw_private *dev_priv,
                            struct vmw_surface *srf,
index 03bbc2a..a0c2f12 100644 (file)
@@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 {
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
        uint32_t fifo_min, hwversion;
+       const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
                return false;
@@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
        if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
                return false;
 
-       hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+       hwversion = ioread32(fifo_mem +
+                            ((fifo->capabilities &
+                              SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+                             SVGA_FIFO_3D_HWVERSION_REVISED :
+                             SVGA_FIFO_3D_HWVERSION));
+
        if (hwversion == 0)
                return false;
 
index 3f63435..66917c6 100644 (file)
@@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
        case DRM_VMW_PARAM_FIFO_HW_VERSION:
        {
                __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
-
-               param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+               const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+               param->value =
+                       ioread32(fifo_mem +
+                                ((fifo->capabilities &
+                                  SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+                                 SVGA_FIFO_3D_HWVERSION_REVISED :
+                                 SVGA_FIFO_3D_HWVERSION));
                break;
        }
        default:
@@ -140,7 +146,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
                goto out_clips;
        }
 
-       clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+       clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
        if (clips == NULL) {
                DRM_ERROR("Failed to allocate clip rect list.\n");
                ret = -ENOMEM;
@@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
                ret = -EINVAL;
                goto out_no_fb;
        }
-
        vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
-       if (!vfb->dmabuf) {
-               DRM_ERROR("Framebuffer not dmabuf backed.\n");
-               ret = -EINVAL;
-               goto out_no_fb;
-       }
 
        ret = ttm_read_lock(&vmaster->lock, true);
        if (unlikely(ret != 0))
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
                goto out_clips;
        }
 
-       clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+       clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
        if (clips == NULL) {
                DRM_ERROR("Failed to allocate clip rect list.\n");
                ret = -ENOMEM;
index 880e285..8aa1dbb 100644 (file)
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
+
+struct vmw_clip_rect {
+       int x1, x2, y1, y2;
+};
+
+/**
+ * Clip @num_rects number of @rects against @clip storing the
+ * results in @out_rects and the number of passed rects in @out_num.
+ */
+void vmw_clip_cliprects(struct drm_clip_rect *rects,
+                       int num_rects,
+                       struct vmw_clip_rect clip,
+                       SVGASignedRect *out_rects,
+                       int *out_num)
+{
+       int i, k;
+
+       for (i = 0, k = 0; i < num_rects; i++) {
+               int x1 = max_t(int, clip.x1, rects[i].x1);
+               int y1 = max_t(int, clip.y1, rects[i].y1);
+               int x2 = min_t(int, clip.x2, rects[i].x2);
+               int y2 = min_t(int, clip.y2, rects[i].y2);
+
+               if (x1 >= x2)
+                       continue;
+               if (y1 >= y2)
+                       continue;
+
+               out_rects[k].left   = x1;
+               out_rects[k].top    = y1;
+               out_rects[k].right  = x2;
+               out_rects[k].bottom = y2;
+               k++;
+       }
+
+       *out_num = k;
+}
+
 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
 {
        if (du->cursor_surface)
@@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
        return 0;
 }
 
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+                            struct vmw_dma_buffer *dmabuf,
+                            u32 width, u32 height,
+                            u32 hotspotX, u32 hotspotY)
+{
+       struct ttm_bo_kmap_obj map;
+       unsigned long kmap_offset;
+       unsigned long kmap_num;
+       void *virtual;
+       bool dummy;
+       int ret;
+
+       kmap_offset = 0;
+       kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("reserve failed\n");
+               return -EINVAL;
+       }
+
+       ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+       if (unlikely(ret != 0))
+               goto err_unreserve;
+
+       virtual = ttm_kmap_obj_virtual(&map, &dummy);
+       ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+                                     hotspotX, hotspotY);
+
+       ttm_bo_kunmap(&map);
+err_unreserve:
+       ttm_bo_unreserve(&dmabuf->base);
+
+       return ret;
+}
+
+
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
                                bool show, int x, int y)
 {
@@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                return -EINVAL;
 
        if (handle) {
-               ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
-                                                    handle, &surface);
-               if (!ret) {
-                       if (!surface->snooper.image) {
-                               DRM_ERROR("surface not suitable for cursor\n");
-                               vmw_surface_unreference(&surface);
-                               return -EINVAL;
-                       }
-               } else {
-                       ret = vmw_user_dmabuf_lookup(tfile,
-                                                    handle, &dmabuf);
-                       if (ret) {
-                               DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
-                               return -EINVAL;
-                       }
+               ret = vmw_user_lookup_handle(dev_priv, tfile,
+                                            handle, &surface, &dmabuf);
+               if (ret) {
+                       DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+                       return -EINVAL;
                }
        }
 
+       /* need to do this before taking down old image */
+       if (surface && !surface->snooper.image) {
+               DRM_ERROR("surface not suitable for cursor\n");
+               vmw_surface_unreference(&surface);
+               return -EINVAL;
+       }
+
        /* takedown old cursor */
        if (du->cursor_surface) {
                du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                vmw_cursor_update_image(dev_priv, surface->snooper.image,
                                        64, 64, du->hotspot_x, du->hotspot_y);
        } else if (dmabuf) {
-               struct ttm_bo_kmap_obj map;
-               unsigned long kmap_offset;
-               unsigned long kmap_num;
-               void *virtual;
-               bool dummy;
-
                /* vmw_user_surface_lookup takes one reference */
                du->cursor_dmabuf = dmabuf;
 
-               kmap_offset = 0;
-               kmap_num = (64*64*4) >> PAGE_SHIFT;
-
-               ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("reserve failed\n");
-                       return -EINVAL;
-               }
-
-               ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
-               if (unlikely(ret != 0))
-                       goto err_unreserve;
-
-               virtual = ttm_kmap_obj_virtual(&map, &dummy);
-               vmw_cursor_update_image(dev_priv, virtual, 64, 64,
-                                       du->hotspot_x, du->hotspot_y);
-
-               ttm_bo_kunmap(&map);
-err_unreserve:
-               ttm_bo_unreserve(&dmabuf->base);
-
+               ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
+                                              du->hotspot_x, du->hotspot_y);
        } else {
                vmw_cursor_update_position(dev_priv, false, 0, 0);
                return 0;
@@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
                                struct drm_clip_rect *clips,
                                unsigned num_clips, int inc)
 {
-       struct drm_clip_rect *clips_ptr;
        struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+       struct drm_clip_rect *clips_ptr;
+       struct drm_clip_rect *tmp;
        struct drm_crtc *crtc;
        size_t fifo_size;
        int i, num_units;
@@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
        } *cmd;
        SVGASignedRect *blits;
 
-
        num_units = 0;
        list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
                            head) {
@@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
 
        BUG_ON(!clips || !num_clips);
 
+       tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+       if (unlikely(tmp == NULL)) {
+               DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+               return -ENOMEM;
+       }
+
        fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
        cmd = kzalloc(fifo_size, GFP_KERNEL);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Temporary fifo memory alloc failed.\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_free_tmp;
        }
 
+       /* setup blits pointer */
+       blits = (SVGASignedRect *)&cmd[1];
+
+       /* initial clip region */
        left = clips->x1;
        right = clips->x2;
        top = clips->y1;
@@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
        cmd->body.srcRect.bottom = bottom;
 
        clips_ptr = clips;
-       blits = (SVGASignedRect *)&cmd[1];
        for (i = 0; i < num_clips; i++, clips_ptr += inc) {
-               blits[i].left   = clips_ptr->x1 - left;
-               blits[i].right  = clips_ptr->x2 - left;
-               blits[i].top    = clips_ptr->y1 - top;
-               blits[i].bottom = clips_ptr->y2 - top;
+               tmp[i].x1 = clips_ptr->x1 - left;
+               tmp[i].x2 = clips_ptr->x2 - left;
+               tmp[i].y1 = clips_ptr->y1 - top;
+               tmp[i].y2 = clips_ptr->y2 - top;
        }
 
        /* do per unit writing, reuse fifo for each */
        for (i = 0; i < num_units; i++) {
                struct vmw_display_unit *unit = units[i];
-               int clip_x1 = left - unit->crtc.x;
-               int clip_y1 = top - unit->crtc.y;
-               int clip_x2 = right - unit->crtc.x;
-               int clip_y2 = bottom - unit->crtc.y;
+               struct vmw_clip_rect clip;
+               int num;
+
+               clip.x1 = left - unit->crtc.x;
+               clip.y1 = top - unit->crtc.y;
+               clip.x2 = right - unit->crtc.x;
+               clip.y2 = bottom - unit->crtc.y;
 
                /* skip any crtcs that misses the clip region */
-               if (clip_x1 >= unit->crtc.mode.hdisplay ||
-                   clip_y1 >= unit->crtc.mode.vdisplay ||
-                   clip_x2 <= 0 || clip_y2 <= 0)
+               if (clip.x1 >= unit->crtc.mode.hdisplay ||
+                   clip.y1 >= unit->crtc.mode.vdisplay ||
+                   clip.x2 <= 0 || clip.y2 <= 0)
                        continue;
 
+               /*
+                * In order for the clip rects to be correctly scaled
+                * the src and dest rects needs to be the same size.
+                */
+               cmd->body.destRect.left = clip.x1;
+               cmd->body.destRect.right = clip.x2;
+               cmd->body.destRect.top = clip.y1;
+               cmd->body.destRect.bottom = clip.y2;
+
+               /* create a clip rect of the crtc in dest coords */
+               clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+               clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+               clip.x1 = 0 - clip.x1;
+               clip.y1 = 0 - clip.y1;
+
                /* need to reset sid as it is changed by execbuf */
                cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-
                cmd->body.destScreenId = unit->unit;
 
-               /*
-                * The blit command is a lot more resilient then the
-                * readback command when it comes to clip rects. So its
-                * okay to go out of bounds.
-                */
+               /* clip and write blits to cmd stream */
+               vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
-               cmd->body.destRect.left = clip_x1;
-               cmd->body.destRect.right = clip_x2;
-               cmd->body.destRect.top = clip_y1;
-               cmd->body.destRect.bottom = clip_y2;
+               /* if no cliprects hit skip this */
+               if (num == 0)
+                       continue;
 
 
+               /* recalculate package length */
+               fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+               cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
                ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
                                          fifo_size, 0, NULL);
 
@@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
                        break;
        }
 
+
        kfree(cmd);
+out_free_tmp:
+       kfree(tmp);
 
        return ret;
 }
@@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
         * Sanity checks.
         */
 
+       /* Surface must be marked as a scanout. */
+       if (unlikely(!surface->scanout))
+               return -EINVAL;
+
        if (unlikely(surface->mip_levels[0] != 1 ||
                     surface->num_sizes != 1 ||
                     surface->sizes[0].width < mode_cmd->width ||
@@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
                        int clip_y1 = clips_ptr->y1 - unit->crtc.y;
                        int clip_x2 = clips_ptr->x2 - unit->crtc.x;
                        int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+                       int move_x, move_y;
 
                        /* skip any crtcs that misses the clip region */
                        if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
                            clip_x2 <= 0 || clip_y2 <= 0)
                                continue;
 
+                       /* clip size to crtc size */
+                       clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
+                       clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
+
+                       /* translate both src and dest to bring clip into screen */
+                       move_x = min_t(int, clip_x1, 0);
+                       move_y = min_t(int, clip_y1, 0);
+
+                       /* actual translate done here */
                        blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
                        blits[hit_num].body.destScreenId = unit->unit;
-                       blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
-                       blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
-                       blits[hit_num].body.destRect.left = clip_x1;
-                       blits[hit_num].body.destRect.top = clip_y1;
+                       blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
+                       blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
+                       blits[hit_num].body.destRect.left = clip_x1 - move_x;
+                       blits[hit_num].body.destRect.top = clip_y1 - move_y;
                        blits[hit_num].body.destRect.right = clip_x2;
                        blits[hit_num].body.destRect.bottom = clip_y2;
                        hit_num++;
@@ -1033,46 +1123,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
                return ERR_PTR(-ENOENT);
        }
 
-       /**
-        * End conditioned code.
-        */
-
-       ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
-                                            mode_cmd->handle, &surface);
+       /* returns either a dmabuf or surface */
+       ret = vmw_user_lookup_handle(dev_priv, tfile,
+                                    mode_cmd->handle,
+                                    &surface, &bo);
        if (ret)
-               goto try_dmabuf;
-
-       if (!surface->scanout)
-               goto err_not_scanout;
-
-       ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
-                                             &vfb, mode_cmd);
-
-       /* vmw_user_surface_lookup takes one ref so does new_fb */
-       vmw_surface_unreference(&surface);
-
-       if (ret) {
-               DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
-               ttm_base_object_unref(&user_obj);
-               return ERR_PTR(ret);
-       } else
-               vfb->user_obj = user_obj;
-       return &vfb->base;
-
-try_dmabuf:
-       DRM_INFO("%s: trying buffer\n", __func__);
-
-       ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
-       if (ret) {
-               DRM_ERROR("failed to find buffer: %i\n", ret);
-               return ERR_PTR(-ENOENT);
-       }
-
-       ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-                                            mode_cmd);
+               goto err_out;
+
+       /* Create the new framebuffer depending one what we got back */
+       if (bo)
+               ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+                                                    mode_cmd);
+       else if (surface)
+               ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+                                                     surface, &vfb, mode_cmd);
+       else
+               BUG();
 
-       /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
-       vmw_dmabuf_unreference(&bo);
+err_out:
+       /* vmw_user_lookup_handle takes one ref so does new_fb */
+       if (bo)
+               vmw_dmabuf_unreference(&bo);
+       if (surface)
+               vmw_surface_unreference(&surface);
 
        if (ret) {
                DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1082,14 +1155,6 @@ try_dmabuf:
                vfb->user_obj = user_obj;
 
        return &vfb->base;
-
-err_not_scanout:
-       DRM_ERROR("surface not marked as scanout\n");
-       /* vmw_user_surface_lookup takes one ref */
-       vmw_surface_unreference(&surface);
-       ttm_base_object_unref(&user_obj);
-
-       return ERR_PTR(-EINVAL);
 }
 
 static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1106,10 +1171,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
                    uint32_t num_clips)
 {
        struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+       struct drm_clip_rect *tmp;
        struct drm_crtc *crtc;
        size_t fifo_size;
        int i, k, num_units;
        int ret = 0; /* silence warning */
+       int left, right, top, bottom;
 
        struct {
                SVGA3dCmdHeader header;
@@ -1127,60 +1194,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
        BUG_ON(surface == NULL);
        BUG_ON(!clips || !num_clips);
 
+       tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+       if (unlikely(tmp == NULL)) {
+               DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+               return -ENOMEM;
+       }
+
        fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
        cmd = kmalloc(fifo_size, GFP_KERNEL);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Failed to allocate temporary fifo memory.\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_free_tmp;
+       }
+
+       left = clips->x;
+       right = clips->x + clips->w;
+       top = clips->y;
+       bottom = clips->y + clips->h;
+
+       for (i = 1; i < num_clips; i++) {
+               left = min_t(int, left, (int)clips[i].x);
+               right = max_t(int, right, (int)clips[i].x + clips[i].w);
+               top = min_t(int, top, (int)clips[i].y);
+               bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
        }
 
        /* only need to do this once */
        memset(cmd, 0, fifo_size);
        cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-       cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
-       cmd->body.srcRect.left = 0;
-       cmd->body.srcRect.right = surface->sizes[0].width;
-       cmd->body.srcRect.top = 0;
-       cmd->body.srcRect.bottom = surface->sizes[0].height;
 
        blits = (SVGASignedRect *)&cmd[1];
+
+       cmd->body.srcRect.left = left;
+       cmd->body.srcRect.right = right;
+       cmd->body.srcRect.top = top;
+       cmd->body.srcRect.bottom = bottom;
+
        for (i = 0; i < num_clips; i++) {
-               blits[i].left   = clips[i].x;
-               blits[i].right  = clips[i].x + clips[i].w;
-               blits[i].top    = clips[i].y;
-               blits[i].bottom = clips[i].y + clips[i].h;
+               tmp[i].x1 = clips[i].x - left;
+               tmp[i].x2 = clips[i].x + clips[i].w - left;
+               tmp[i].y1 = clips[i].y - top;
+               tmp[i].y2 = clips[i].y + clips[i].h - top;
        }
 
        for (k = 0; k < num_units; k++) {
                struct vmw_display_unit *unit = units[k];
-               int clip_x1 = destX - unit->crtc.x;
-               int clip_y1 = destY - unit->crtc.y;
-               int clip_x2 = clip_x1 + surface->sizes[0].width;
-               int clip_y2 = clip_y1 + surface->sizes[0].height;
+               struct vmw_clip_rect clip;
+               int num;
+
+               clip.x1 = left + destX - unit->crtc.x;
+               clip.y1 = top + destY - unit->crtc.y;
+               clip.x2 = right + destX - unit->crtc.x;
+               clip.y2 = bottom + destY - unit->crtc.y;
 
                /* skip any crtcs that misses the clip region */
-               if (clip_x1 >= unit->crtc.mode.hdisplay ||
-                   clip_y1 >= unit->crtc.mode.vdisplay ||
-                   clip_x2 <= 0 || clip_y2 <= 0)
+               if (clip.x1 >= unit->crtc.mode.hdisplay ||
+                   clip.y1 >= unit->crtc.mode.vdisplay ||
+                   clip.x2 <= 0 || clip.y2 <= 0)
                        continue;
 
+               /*
+                * In order for the clip rects to be correctly scaled
+                * the src and dest rects needs to be the same size.
+                */
+               cmd->body.destRect.left = clip.x1;
+               cmd->body.destRect.right = clip.x2;
+               cmd->body.destRect.top = clip.y1;
+               cmd->body.destRect.bottom = clip.y2;
+
+               /* create a clip rect of the crtc in dest coords */
+               clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+               clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+               clip.x1 = 0 - clip.x1;
+               clip.y1 = 0 - clip.y1;
+
                /* need to reset sid as it is changed by execbuf */
                cmd->body.srcImage.sid = sid;
-
                cmd->body.destScreenId = unit->unit;
 
-               /*
-                * The blit command is a lot more resilient then the
-                * readback command when it comes to clip rects. So its
-                * okay to go out of bounds.
-                */
+               /* clip and write blits to cmd stream */
+               vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
-               cmd->body.destRect.left = clip_x1;
-               cmd->body.destRect.right = clip_x2;
-               cmd->body.destRect.top = clip_y1;
-               cmd->body.destRect.bottom = clip_y2;
+               /* if no cliprects hit skip this */
+               if (num == 0)
+                       continue;
 
+               /* recalculate package length */
+               fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+               cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
                ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
                                          fifo_size, 0, NULL);
 
@@ -1189,6 +1291,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
        }
 
        kfree(cmd);
+out_free_tmp:
+       kfree(tmp);
 
        return ret;
 }
@@ -1809,7 +1913,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
        }
 
        rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
-       rects = kzalloc(rects_size, GFP_KERNEL);
+       rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+                       GFP_KERNEL);
        if (unlikely(!rects)) {
                ret = -ENOMEM;
                goto out_unlock;
@@ -1824,10 +1929,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
        }
 
        for (i = 0; i < arg->num_outputs; ++i) {
-               if (rects->x < 0 ||
-                   rects->y < 0 ||
-                   rects->x + rects->w > mode_config->max_width ||
-                   rects->y + rects->h > mode_config->max_height) {
+               if (rects[i].x < 0 ||
+                   rects[i].y < 0 ||
+                   rects[i].x + rects[i].w > mode_config->max_width ||
+                   rects[i].y + rects[i].h > mode_config->max_height) {
                        DRM_ERROR("Invalid GUI layout.\n");
                        ret = -EINVAL;
                        goto out_free;
index af8e6e5..e1cb855 100644 (file)
@@ -62,9 +62,14 @@ struct vmw_framebuffer {
 int vmw_cursor_update_image(struct vmw_private *dev_priv,
                            u32 *image, u32 width, u32 height,
                            u32 hotspotX, u32 hotspotY);
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+                            struct vmw_dma_buffer *dmabuf,
+                            u32 width, u32 height,
+                            u32 hotspotX, u32 hotspotY);
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
                                bool show, int x, int y);
 
+
 /**
  * Base class display unit.
  *
index 90c5e39..8f8dbd4 100644 (file)
@@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
 {
        struct vmw_legacy_display *lds = dev_priv->ldu_priv;
        struct vmw_legacy_display_unit *entry;
+       struct vmw_display_unit *du = NULL;
        struct drm_framebuffer *fb = NULL;
        struct drm_crtc *crtc = NULL;
-       int i = 0;
+       int i = 0, ret;
 
        /* If there is no display topology the host just assumes
         * that the guest will set the same layout as the host.
@@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
 
        lds->last_num_active = lds->num_active;
 
+
+       /* Find the first du with a cursor. */
+       list_for_each_entry(entry, &lds->active, active) {
+               du = &entry->base;
+
+               if (!du->cursor_dmabuf)
+                       continue;
+
+               ret = vmw_cursor_update_dmabuf(dev_priv,
+                                              du->cursor_dmabuf,
+                                              64, 64,
+                                              du->hotspot_x,
+                                              du->hotspot_y);
+               if (ret == 0)
+                       break;
+
+               DRM_ERROR("Could not update cursor image\n");
+       }
+
        return 0;
 }
 
index 86c5e4c..1c7f09e 100644 (file)
@@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
                write_unlock(lock);
 }
 
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+                          struct ttm_object_file *tfile,
+                          uint32_t handle,
+                          struct vmw_surface **out_surf,
+                          struct vmw_dma_buffer **out_buf)
+{
+       int ret;
+
+       BUG_ON(*out_surf || *out_buf);
+
+       ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
+       if (!ret)
+               return 0;
+
+       ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+       return ret;
+}
+
 
 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
                                   struct ttm_object_file *tfile,
index 848a56c..af35384 100644 (file)
@@ -1771,8 +1771,8 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
index 06ce996..4a441a6 100644 (file)
 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
 
 #define USB_VENDOR_ID_GENERAL_TOUCH    0x0dfc
-#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
 
 #define USB_VENDOR_ID_GLAB             0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30        0x0038
index 7a48b1e..5253d23 100644 (file)
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
 {
        struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
        struct completion *completion = &hwmon->read_completion;
-       unsigned long t;
+       long t;
        unsigned long val;
        int ret;
 
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
        .probe  = jz4740_hwmon_probe,
        .remove = __devexit_p(jz4740_hwmon_remove),
        .driver = {
index 8cebef4..18936ac 100644 (file)
@@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
        /* Set the number of I2C channel instance */
        adap_info->ch_num = id->driver_data;
 
+       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+                 KBUILD_MODNAME, adap_info);
+       if (ret) {
+               pch_pci_err(pdev, "request_irq FAILED\n");
+               goto err_request_irq;
+       }
+
        for (i = 0; i < adap_info->ch_num; i++) {
                pch_adap = &adap_info->pch_data[i].pch_adapter;
                adap_info->pch_i2c_suspended = false;
@@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
 
                pch_adap->dev.parent = &pdev->dev;
 
+               pch_i2c_init(&adap_info->pch_data[i]);
                ret = i2c_add_adapter(pch_adap);
                if (ret) {
                        pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
-                       goto err_i2c_add_adapter;
+                       goto err_add_adapter;
                }
-
-               pch_i2c_init(&adap_info->pch_data[i]);
-       }
-       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
-                 KBUILD_MODNAME, adap_info);
-       if (ret) {
-               pch_pci_err(pdev, "request_irq FAILED\n");
-               goto err_i2c_add_adapter;
        }
 
        pci_set_drvdata(pdev, adap_info);
        pch_pci_dbg(pdev, "returns %d.\n", ret);
        return 0;
 
-err_i2c_add_adapter:
+err_add_adapter:
        for (j = 0; j < i; j++)
                i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
+       free_irq(pdev->irq, adap_info);
+err_request_irq:
        pci_iounmap(pdev, base_addr);
 err_pci_iomap:
        pci_release_regions(pdev);
index a43d002..fa23faa 100644 (file)
@@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
                 * size. This is to ensure that we can handle the status on int
                 * call back latencies.
                 */
-               if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
-                       dev->fifo_size = 0;
+
+               dev->fifo_size = (dev->fifo_size / 2);
+
+               if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
                        dev->b_hw = 0; /* Disable hardware fixes */
-               } else {
-                       dev->fifo_size = (dev->fifo_size / 2);
+               else
                        dev->b_hw = 1; /* Enable hardware fixes */
-               }
+
                /* calculate wakeup latency constraint for MPU */
                if (dev->set_mpu_wkup_lat != NULL)
                        dev->latency = (1000000 * dev->fifo_size) /
index 2754cef..4c17180 100644 (file)
@@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
 
        /* first, try busy waiting briefly */
        do {
+               cpu_relax();
                iicstat = readl(i2c->regs + S3C2410_IICSTAT);
        } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
 
@@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
 #else
 static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
 {
-       return -EINVAL;
+       return 0;
 }
 
 static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
index a20c3c8..1612cfd 100644 (file)
@@ -178,6 +178,25 @@ static void queue_req(struct addr_req *req)
        mutex_unlock(&lock);
 }
 
+static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr)
+{
+       struct neighbour *n;
+       int ret;
+
+       rcu_read_lock();
+       n = dst_get_neighbour_noref(dst);
+       if (!n || !(n->nud_state & NUD_VALID)) {
+               if (n)
+                       neigh_event_send(n, NULL);
+               ret = -ENODATA;
+       } else {
+               ret = rdma_copy_addr(addr, dst->dev, n->ha);
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static int addr4_resolve(struct sockaddr_in *src_in,
                         struct sockaddr_in *dst_in,
                         struct rdma_dev_addr *addr)
@@ -185,7 +204,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
        __be32 src_ip = src_in->sin_addr.s_addr;
        __be32 dst_ip = dst_in->sin_addr.s_addr;
        struct rtable *rt;
-       struct neighbour *neigh;
        struct flowi4 fl4;
        int ret;
 
@@ -214,20 +232,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
                goto put;
        }
 
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
-       if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-               rcu_read_lock();
-               neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-               rcu_read_unlock();
-               ret = -ENODATA;
-               if (neigh)
-                       goto release;
-               goto put;
-       }
-
-       ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
-release:
-       neigh_release(neigh);
+       ret = dst_fetch_ha(&rt->dst, addr);
 put:
        ip_rt_put(rt);
 out:
@@ -240,7 +245,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                         struct rdma_dev_addr *addr)
 {
        struct flowi6 fl6;
-       struct neighbour *neigh;
        struct dst_entry *dst;
        int ret;
 
@@ -276,16 +280,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                goto put;
        }
 
-       rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
-       if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-               if (neigh)
-                       neigh_event_send(neigh, NULL);
-               ret = -ENODATA;
-       } else {
-               ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
-       }
-       rcu_read_unlock();
+       ret = dst_fetch_ha(dst, addr);
 put:
        dst_release(dst);
        return ret;
index 09e66cc..236a88c 100644 (file)
@@ -2513,6 +2513,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
 
        req.private_data_len = sizeof(struct cma_hdr) +
                               conn_param->private_data_len;
+       if (req.private_data_len < conn_param->private_data_len)
+               return -EINVAL;
+
        req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
        if (!req.private_data)
                return -ENOMEM;
@@ -2562,6 +2565,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
        memset(&req, 0, sizeof req);
        offset = cma_user_data_offset(id_priv->id.ps);
        req.private_data_len = offset + conn_param->private_data_len;
+       if (req.private_data_len < conn_param->private_data_len)
+               return -EINVAL;
+
        private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
        if (!private_data)
                return -ENOMEM;
index c88b12b..740dcc0 100644 (file)
@@ -1338,7 +1338,6 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        struct iwch_ep *child_ep, *parent_ep = ctx;
        struct cpl_pass_accept_req *req = cplhdr(skb);
        unsigned int hwtid = GET_TID(req);
-       struct neighbour *neigh;
        struct dst_entry *dst;
        struct l2t_entry *l2t;
        struct rtable *rt;
@@ -1375,10 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                goto reject;
        }
        dst = &rt->dst;
-       rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
-       l2t = t3_l2t_get(tdev, neigh, neigh->dev);
-       rcu_read_unlock();
+       l2t = t3_l2t_get(tdev, dst, NULL);
        if (!l2t) {
                printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
@@ -1889,7 +1885,6 @@ static int is_loopback_dst(struct iw_cm_id *cm_id)
 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 {
        struct iwch_dev *h = to_iwch_dev(cm_id->device);
-       struct neighbour *neigh;
        struct iwch_ep *ep;
        struct rtable *rt;
        int err = 0;
@@ -1947,13 +1942,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                goto fail3;
        }
        ep->dst = &rt->dst;
-
-       rcu_read_lock();
-       neigh = dst_get_neighbour(ep->dst);
-
-       /* get a l2t entry */
-       ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
-       rcu_read_unlock();
+       ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL);
        if (!ep->l2t) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
                err = -ENOMEM;
index 0747004..0668bb3 100644 (file)
@@ -1556,6 +1556,67 @@ static void get_4tuple(struct cpl_pass_accept_req *req,
        return;
 }
 
+static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
+                    struct c4iw_dev *cdev, bool clear_mpa_v1)
+{
+       struct neighbour *n;
+       int err, step;
+
+       rcu_read_lock();
+       n = dst_get_neighbour_noref(dst);
+       err = -ENODEV;
+       if (!n)
+               goto out;
+       err = -ENOMEM;
+       if (n->dev->flags & IFF_LOOPBACK) {
+               struct net_device *pdev;
+
+               pdev = ip_dev_find(&init_net, peer_ip);
+               ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+                                       n, pdev, 0);
+               if (!ep->l2t)
+                       goto out;
+               ep->mtu = pdev->mtu;
+               ep->tx_chan = cxgb4_port_chan(pdev);
+               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+               step = cdev->rdev.lldi.ntxq /
+                       cdev->rdev.lldi.nchan;
+               ep->txq_idx = cxgb4_port_idx(pdev) * step;
+               step = cdev->rdev.lldi.nrxq /
+                       cdev->rdev.lldi.nchan;
+               ep->ctrlq_idx = cxgb4_port_idx(pdev);
+               ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+                       cxgb4_port_idx(pdev) * step];
+               dev_put(pdev);
+       } else {
+               ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+                                       n, n->dev, 0);
+               if (!ep->l2t)
+                       goto out;
+               ep->mtu = dst_mtu(ep->dst);
+               ep->tx_chan = cxgb4_port_chan(n->dev);
+               ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
+               step = cdev->rdev.lldi.ntxq /
+                       cdev->rdev.lldi.nchan;
+               ep->txq_idx = cxgb4_port_idx(n->dev) * step;
+               ep->ctrlq_idx = cxgb4_port_idx(n->dev);
+               step = cdev->rdev.lldi.nrxq /
+                       cdev->rdev.lldi.nchan;
+               ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+                       cxgb4_port_idx(n->dev) * step];
+
+               if (clear_mpa_v1) {
+                       ep->retry_with_mpa_v1 = 0;
+                       ep->tried_with_mpa_v1 = 0;
+               }
+       }
+       err = 0;
+out:
+       rcu_read_unlock();
+
+       return err;
+}
+
 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct c4iw_ep *child_ep, *parent_ep;
@@ -1563,18 +1624,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
        struct tid_info *t = dev->rdev.lldi.tids;
        unsigned int hwtid = GET_TID(req);
-       struct neighbour *neigh;
        struct dst_entry *dst;
-       struct l2t_entry *l2t;
        struct rtable *rt;
        __be32 local_ip, peer_ip;
        __be16 local_port, peer_port;
-       struct net_device *pdev;
-       u32 tx_chan, smac_idx;
-       u16 rss_qid;
-       u32 mtu;
-       int step;
-       int txq_idx, ctrlq_idx;
+       int err;
 
        parent_ep = lookup_stid(t, stid);
        PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1596,49 +1650,24 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                goto reject;
        }
        dst = &rt->dst;
-       rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
-       if (neigh->dev->flags & IFF_LOOPBACK) {
-               pdev = ip_dev_find(&init_net, peer_ip);
-               BUG_ON(!pdev);
-               l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0);
-               mtu = pdev->mtu;
-               tx_chan = cxgb4_port_chan(pdev);
-               smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-               step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
-               txq_idx = cxgb4_port_idx(pdev) * step;
-               ctrlq_idx = cxgb4_port_idx(pdev);
-               step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
-               rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
-               dev_put(pdev);
-       } else {
-               l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0);
-               mtu = dst_mtu(dst);
-               tx_chan = cxgb4_port_chan(neigh->dev);
-               smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-               step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
-               txq_idx = cxgb4_port_idx(neigh->dev) * step;
-               ctrlq_idx = cxgb4_port_idx(neigh->dev);
-               step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
-               rss_qid = dev->rdev.lldi.rxq_ids[
-                         cxgb4_port_idx(neigh->dev) * step];
-       }
-       rcu_read_unlock();
-       if (!l2t) {
-               printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+
+       child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+       if (!child_ep) {
+               printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
                       __func__);
                dst_release(dst);
                goto reject;
        }
 
-       child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
-       if (!child_ep) {
-               printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
+       err = import_ep(child_ep, peer_ip, dst, dev, false);
+       if (err) {
+               printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
-               cxgb4_l2t_release(l2t);
                dst_release(dst);
+               kfree(child_ep);
                goto reject;
        }
+
        state_set(&child_ep->com, CONNECTING);
        child_ep->com.dev = dev;
        child_ep->com.cm_id = NULL;
@@ -1651,18 +1680,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        c4iw_get_ep(&parent_ep->com);
        child_ep->parent_ep = parent_ep;
        child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
-       child_ep->l2t = l2t;
        child_ep->dst = dst;
        child_ep->hwtid = hwtid;
-       child_ep->tx_chan = tx_chan;
-       child_ep->smac_idx = smac_idx;
-       child_ep->rss_qid = rss_qid;
-       child_ep->mtu = mtu;
-       child_ep->txq_idx = txq_idx;
-       child_ep->ctrlq_idx = ctrlq_idx;
 
        PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
-            tx_chan, smac_idx, rss_qid);
+            child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
 
        init_timer(&child_ep->timer);
        cxgb4_insert_tid(t, child_ep, hwtid);
@@ -1792,11 +1814,8 @@ static int is_neg_adv_abort(unsigned int status)
 
 static int c4iw_reconnect(struct c4iw_ep *ep)
 {
-       int err = 0;
        struct rtable *rt;
-       struct net_device *pdev;
-       struct neighbour *neigh;
-       int step;
+       int err = 0;
 
        PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
        init_timer(&ep->timer);
@@ -1824,47 +1843,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
        }
        ep->dst = &rt->dst;
 
-       rcu_read_lock();
-       neigh = dst_get_neighbour(ep->dst);
-
-       /* get a l2t entry */
-       if (neigh->dev->flags & IFF_LOOPBACK) {
-               PDBG("%s LOOPBACK\n", __func__);
-               pdev = ip_dev_find(&init_net,
-                                  ep->com.cm_id->remote_addr.sin_addr.s_addr);
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, pdev, 0);
-               ep->mtu = pdev->mtu;
-               ep->tx_chan = cxgb4_port_chan(pdev);
-               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(pdev) * step;
-               step = ep->com.dev->rdev.lldi.nrxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->ctrlq_idx = cxgb4_port_idx(pdev);
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                       cxgb4_port_idx(pdev) * step];
-               dev_put(pdev);
-       } else {
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, neigh->dev, 0);
-               ep->mtu = dst_mtu(ep->dst);
-               ep->tx_chan = cxgb4_port_chan(neigh->dev);
-               ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
-               ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
-               step = ep->com.dev->rdev.lldi.nrxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                       cxgb4_port_idx(neigh->dev) * step];
-       }
-       rcu_read_unlock();
-       if (!ep->l2t) {
+       err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
+                       ep->dst, ep->com.dev, false);
+       if (err) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
-               err = -ENOMEM;
                goto fail4;
        }
 
@@ -2240,13 +2222,10 @@ err:
 
 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 {
-       int err = 0;
        struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
        struct c4iw_ep *ep;
        struct rtable *rt;
-       struct net_device *pdev;
-       struct neighbour *neigh;
-       int step;
+       int err = 0;
 
        if ((conn_param->ord > c4iw_max_read_depth) ||
            (conn_param->ird > c4iw_max_read_depth)) {
@@ -2307,49 +2286,10 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
        ep->dst = &rt->dst;
 
-       rcu_read_lock();
-       neigh = dst_get_neighbour(ep->dst);
-
-       /* get a l2t entry */
-       if (neigh->dev->flags & IFF_LOOPBACK) {
-               PDBG("%s LOOPBACK\n", __func__);
-               pdev = ip_dev_find(&init_net,
-                                  cm_id->remote_addr.sin_addr.s_addr);
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, pdev, 0);
-               ep->mtu = pdev->mtu;
-               ep->tx_chan = cxgb4_port_chan(pdev);
-               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(pdev) * step;
-               step = ep->com.dev->rdev.lldi.nrxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->ctrlq_idx = cxgb4_port_idx(pdev);
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                             cxgb4_port_idx(pdev) * step];
-               dev_put(pdev);
-       } else {
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, neigh->dev, 0);
-               ep->mtu = dst_mtu(ep->dst);
-               ep->tx_chan = cxgb4_port_chan(neigh->dev);
-               ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
-               ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
-               step = ep->com.dev->rdev.lldi.nrxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                             cxgb4_port_idx(neigh->dev) * step];
-               ep->retry_with_mpa_v1 = 0;
-               ep->tried_with_mpa_v1 = 0;
-       }
-       rcu_read_unlock();
-       if (!ep->l2t) {
+       err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
+                       ep->dst, ep->com.dev, true);
+       if (err) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
-               err = -ENOMEM;
                goto fail4;
        }
 
index f36da99..95c94d8 100644 (file)
@@ -109,7 +109,8 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
 
        err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
                           in_modifier, op_modifier,
-                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_NATIVE);
 
        if (!err)
                memcpy(response_mad, outmailbox->buf, 256);
@@ -330,7 +331,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
                return IB_MAD_RESULT_FAILURE;
 
        err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
-                          MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_WRAPPED);
        if (err)
                err = IB_MAD_RESULT_FAILURE;
        else {
index 77f3dbc..7b445df 100644 (file)
@@ -177,7 +177,7 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
 {
        struct mlx4_dev *dev = to_mdev(device)->dev;
 
-       return dev->caps.port_mask & (1 << (port_num - 1)) ?
+       return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
                IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
 }
 
@@ -434,7 +434,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
        memset(mailbox->buf, 0, 256);
        memcpy(mailbox->buf, props->node_desc, 64);
        mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
-                MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
+                MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
 
@@ -463,7 +463,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
        }
 
        err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev->dev, mailbox);
        return err;
@@ -899,7 +899,8 @@ static void update_gids_task(struct work_struct *work)
        memcpy(gids, gw->gids, sizeof gw->gids);
 
        err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
-                      1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
+                      1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
        if (err)
                printk(KERN_WARNING "set port command failed\n");
        else {
@@ -1074,6 +1075,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 
        printk_once(KERN_INFO "%s", mlx4_ib_version);
 
+       if (mlx4_is_mfunc(dev)) {
+               printk(KERN_WARNING "IB not yet supported in SRIOV\n");
+               return NULL;
+       }
+
        mlx4_foreach_ib_transport_port(i, dev)
                num_ports++;
 
@@ -1244,7 +1250,8 @@ err_reg:
 
 err_counter:
        for (; i; --i)
-               mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+               if (ibdev->counters[i - 1] != -1)
+                       mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
 
 err_map:
        iounmap(ibdev->uar_map);
@@ -1275,7 +1282,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
        }
        iounmap(ibdev->uar_map);
        for (p = 0; p < ibdev->num_ports; ++p)
-               mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+               if (ibdev->counters[p] != -1)
+                       mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
        mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
                mlx4_CLOSE_PORT(dev, p);
 
index 0a52d72..b1e6cae 100644 (file)
@@ -1348,7 +1348,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
        else
                netdev = nesvnic->netdev;
 
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+       rcu_read_lock();
+       neigh = dst_get_neighbour_noref(&rt->dst);
        if (neigh) {
                if (neigh->nud_state & NUD_VALID) {
                        nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -1359,7 +1360,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                                if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
                                            neigh->ha, ETH_ALEN)) {
                                        /* Mac address same as in nes_arp_table */
-                                       neigh_release(neigh);
                                        ip_rt_put(rt);
                                        return rc;
                                }
@@ -1373,15 +1373,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                                             dst_ip, NES_ARP_ADD);
                        rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
                                           NES_ARP_RESOLVE);
+               } else {
+                       neigh_event_send(neigh, NULL);
                }
-               neigh_release(neigh);
-       }
-
-       if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
-               rcu_read_lock();
-               neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-               rcu_read_unlock();
        }
+       rcu_read_unlock();
        ip_rt_put(rt);
        return rc;
 }
index 574600e..a740324 100644 (file)
@@ -1285,7 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
        strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
        ctxt_fp(fp) = rcd;
        qib_stats.sps_ctxts++;
-       dd->freectxts++;
+       dd->freectxts--;
        ret = 0;
        goto bail;
 
@@ -1794,7 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
                if (dd->pageshadow)
                        unlock_expected_tids(rcd);
                qib_stats.sps_ctxts--;
-               dd->freectxts--;
+               dd->freectxts++;
        }
 
        mutex_unlock(&qib_mutex);
index d3ed89c..3514ca0 100644 (file)
@@ -556,15 +556,13 @@ static int path_rec_start(struct net_device *dev,
 }
 
 /* called with rcu_read_lock */
-static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
+static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_path *path;
        struct ipoib_neigh *neigh;
-       struct neighbour *n;
        unsigned long flags;
 
-       n = dst_get_neighbour(skb_dst(skb));
        neigh = ipoib_neigh_alloc(n, skb->dev);
        if (!neigh) {
                ++dev->stats.tx_dropped;
@@ -638,16 +636,13 @@ err_drop:
 }
 
 /* called with rcu_read_lock */
-static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
+static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
-       struct dst_entry *dst = skb_dst(skb);
-       struct neighbour *n;
 
        /* Look up path record for unicasts */
-       n = dst_get_neighbour(dst);
        if (n->ha[4] != 0xff) {
-               neigh_add_path(skb, dev);
+               neigh_add_path(skb, n, dev);
                return;
        }
 
@@ -723,12 +718,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned long flags;
 
        rcu_read_lock();
-       if (likely(skb_dst(skb)))
-               n = dst_get_neighbour(skb_dst(skb));
-
+       if (likely(skb_dst(skb))) {
+               n = dst_get_neighbour_noref(skb_dst(skb));
+               if (!n) {
+                       ++dev->stats.tx_dropped;
+                       dev_kfree_skb_any(skb);
+                       goto unlock;
+               }
+       }
        if (likely(n)) {
                if (unlikely(!*to_ipoib_neigh(n))) {
-                       ipoib_path_lookup(skb, dev);
+                       ipoib_path_lookup(skb, n, dev);
                        goto unlock;
                }
 
@@ -751,7 +751,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        list_del(&neigh->list);
                        ipoib_neigh_free(dev, neigh);
                        spin_unlock_irqrestore(&priv->lock, flags);
-                       ipoib_path_lookup(skb, dev);
+                       ipoib_path_lookup(skb, n, dev);
                        goto unlock;
                }
 
@@ -841,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
        dst = skb_dst(skb);
        n = NULL;
        if (dst)
-               n = dst_get_neighbour_raw(dst);
+               n = dst_get_neighbour_noref_raw(dst);
        if ((!dst || !n) && daddr) {
                struct ipoib_pseudoheader *phdr =
                        (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
index 873bff9..f7ff9dd 100644 (file)
@@ -269,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
 
                skb->dev = dev;
                if (dst)
-                       n = dst_get_neighbour_raw(dst);
+                       n = dst_get_neighbour_noref_raw(dst);
                if (!dst || !n) {
                        /* put pseudoheader back on for next time */
                        skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -728,7 +728,7 @@ out:
 
                rcu_read_lock();
                if (dst)
-                       n = dst_get_neighbour(dst);
+                       n = dst_get_neighbour_noref(dst);
                if (n && !*to_ipoib_neigh(n)) {
                        struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
                                                                      skb->dev);
index 80793f1..06517e6 100644 (file)
@@ -115,8 +115,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
 static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
 {
        struct cma3000_accl_data *data = dev_id;
-       int datax, datay, dataz;
-       u8 ctrl, mode, range, intr_status;
+       int datax, datay, dataz, intr_status;
+       u8 ctrl, mode, range;
 
        intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
        if (intr_status < 0)
index c080b82..a6dcd18 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/input/mt.h>
 #include <linux/serio.h>
@@ -1220,6 +1221,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
 
        do {
                psmouse_reset(psmouse);
+               if (retry) {
+                       /*
+                        * On some boxes, right after resuming, the touchpad
+                        * needs some time to finish initializing (I assume
+                        * it needs time to calibrate) and start responding
+                        * to Synaptics-specific queries, so let's wait a
+                        * bit.
+                        */
+                       ssleep(1);
+               }
                error = synaptics_detect(psmouse, 0);
        } while (error && ++retry < 3);
 
index da0d876..2ee47d0 100644 (file)
@@ -1470,6 +1470,9 @@ static const struct wacom_features wacom_features_0xE3 =
 static const struct wacom_features wacom_features_0xE6 =
        { "Wacom ISDv4 E6",       WACOM_PKGLEN_TPC2FG,    27760, 15694,  255,
          0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xEC =
+       { "Wacom ISDv4 EC",       WACOM_PKGLEN_GRAPHIRE,  25710, 14500,  255,
+         0, TABLETPC,    WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x47 =
        { "Wacom Intuos2 6x8",    WACOM_PKGLEN_INTUOS,    20320, 16240, 1023,
          31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1611,6 +1614,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0xE2) },
        { USB_DEVICE_WACOM(0xE3) },
        { USB_DEVICE_WACOM(0xE6) },
+       { USB_DEVICE_WACOM(0xEC) },
        { USB_DEVICE_WACOM(0x47) },
        { USB_DEVICE_LENOVO(0x6004) },
        { }
index c0c7820..bdc447f 100644 (file)
@@ -405,6 +405,9 @@ int dmar_disabled = 0;
 int dmar_disabled = 1;
 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
 
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
 static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
@@ -3524,7 +3527,7 @@ found:
        return 0;
 }
 
-int dmar_parse_rmrr_atsr_dev(void)
+int __init dmar_parse_rmrr_atsr_dev(void)
 {
        struct dmar_rmrr_unit *rmrr, *rmrr_n;
        struct dmar_atsr_unit *atsr, *atsr_n;
@@ -3647,6 +3650,8 @@ int __init intel_iommu_init(void)
 
        bus_register_notifier(&pci_bus_type, &device_nb);
 
+       intel_iommu_enabled = 1;
+
        return 0;
 }
 
index 07c9f18..6777ca0 100644 (file)
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
        return ir_supported;
 }
 
-int ir_dev_scope_init(void)
+int __init ir_dev_scope_init(void)
 {
        if (!intr_remapping_enabled)
                return 0;
index 7878712..b690711 100644 (file)
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
         */
        int i;
 
+       spin_lock_irq(&bitmap->lock);
        for (i = 0; i < bitmap->file_pages; i++)
                set_page_attr(bitmap, bitmap->filemap[i],
                              BITMAP_PAGE_NEEDWRITE);
        bitmap->allclean = 0;
+       spin_unlock_irq(&bitmap->lock);
 }
 
 static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1605,7 +1607,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
        for (chunk = s; chunk <= e; chunk++) {
                sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
                bitmap_set_memory_bits(bitmap, sec, 1);
+               spin_lock_irq(&bitmap->lock);
                bitmap_file_set_bit(bitmap, sec);
+               spin_unlock_irq(&bitmap->lock);
                if (sec < bitmap->mddev->recovery_cp)
                        /* We are asserting that the array is dirty,
                         * so move the recovery_cp address back so
index 84acfe7..ee98173 100644 (file)
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
            mddev->ctime == 0 && !mddev->hold_active) {
                /* Array is not configured at all, and not held active,
                 * so destroy it */
-               list_del(&mddev->all_mddevs);
+               list_del_init(&mddev->all_mddevs);
                bs = mddev->bio_set;
                mddev->bio_set = NULL;
                if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
                sep = ",";
        }
        if (test_bit(Blocked, &rdev->flags) ||
-           rdev->badblocks.unacked_exist) {
+           (rdev->badblocks.unacked_exist
+            && !test_bit(Faulty, &rdev->flags))) {
                len += sprintf(page+len, "%sblocked", sep);
                sep = ",";
        }
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        if (err)
                return err;
        else {
+               if (mddev->hold_active == UNTIL_IOCTL)
+                       mddev->hold_active = 0;
                sysfs_notify_dirent_safe(mddev->sysfs_state);
                return len;
        }
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 
        if (!entry->show)
                return -EIO;
+       spin_lock(&all_mddevs_lock);
+       if (list_empty(&mddev->all_mddevs)) {
+               spin_unlock(&all_mddevs_lock);
+               return -EBUSY;
+       }
+       mddev_get(mddev);
+       spin_unlock(&all_mddevs_lock);
+
        rv = mddev_lock(mddev);
        if (!rv) {
                rv = entry->show(mddev, page);
                mddev_unlock(mddev);
        }
+       mddev_put(mddev);
        return rv;
 }
 
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
                return -EIO;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
+       spin_lock(&all_mddevs_lock);
+       if (list_empty(&mddev->all_mddevs)) {
+               spin_unlock(&all_mddevs_lock);
+               return -EBUSY;
+       }
+       mddev_get(mddev);
+       spin_unlock(&all_mddevs_lock);
        rv = mddev_lock(mddev);
-       if (mddev->hold_active == UNTIL_IOCTL)
-               mddev->hold_active = 0;
        if (!rv) {
                rv = entry->store(mddev, page, length);
                mddev_unlock(mddev);
        }
+       mddev_put(mddev);
        return rv;
 }
 
@@ -7840,6 +7858,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
                                  s + rdev->data_offset, sectors, acknowledged);
        if (rv) {
                /* Make sure they get written out promptly */
+               sysfs_notify_dirent_safe(rdev->sysfs_state);
                set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
                md_wakeup_thread(rdev->mddev->thread);
        }
index 297e260..31670f8 100644 (file)
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                if (dev->written)
                        s->written++;
                rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && test_bit(Faulty, &rdev->flags))
+                       rdev = NULL;
                if (rdev) {
                        is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
                                             &first_bad, &bad_sectors);
@@ -3063,12 +3065,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        }
                } else if (test_bit(In_sync, &rdev->flags))
                        set_bit(R5_Insync, &dev->flags);
-               else if (!test_bit(Faulty, &rdev->flags)) {
+               else {
                        /* in sync if before recovery_offset */
                        if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
                                set_bit(R5_Insync, &dev->flags);
                }
-               if (test_bit(R5_WriteError, &dev->flags)) {
+               if (rdev && test_bit(R5_WriteError, &dev->flags)) {
                        clear_bit(R5_Insync, &dev->flags);
                        if (!test_bit(Faulty, &rdev->flags)) {
                                s->handle_bad_blocks = 1;
@@ -3076,7 +3078,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        } else
                                clear_bit(R5_WriteError, &dev->flags);
                }
-               if (test_bit(R5_MadeGood, &dev->flags)) {
+               if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
                        if (!test_bit(Faulty, &rdev->flags)) {
                                s->handle_bad_blocks = 1;
                                atomic_inc(&rdev->nr_pending);
index 7eb1bf7..5d02221 100644 (file)
@@ -488,9 +488,10 @@ static int mxl5007t_write_regs(struct mxl5007t_state *state,
 
 static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
 {
+       u8 buf[2] = { 0xfb, reg };
        struct i2c_msg msg[] = {
                { .addr = state->i2c_props.addr, .flags = 0,
-                 .buf = &reg, .len = 1 },
+                 .buf = buf, .len = 2 },
                { .addr = state->i2c_props.addr, .flags = I2C_M_RD,
                  .buf = val, .len = 1 },
        };
index aacfe23..4fc2973 100644 (file)
@@ -141,7 +141,7 @@ static int tda18218_set_params(struct dvb_frontend *fe,
        switch (params->u.ofdm.bandwidth) {
        case BANDWIDTH_6_MHZ:
                LP_Fc = 0;
-               LO_Frac = params->frequency + 4000000;
+               LO_Frac = params->frequency + 3000000;
                break;
        case BANDWIDTH_7_MHZ:
                LP_Fc = 1;
index 303f22e..01bb8da 100644 (file)
@@ -189,7 +189,7 @@ struct ati_remote {
        dma_addr_t inbuf_dma;
        dma_addr_t outbuf_dma;
 
-       unsigned char old_data[2];  /* Detect duplicate events */
+       unsigned char old_data;     /* Detect duplicate events */
        unsigned long old_jiffies;
        unsigned long acc_jiffies;  /* handle acceleration */
        unsigned long first_jiffies;
@@ -221,35 +221,35 @@ struct ati_remote {
 /* Translation table from hardware messages to input events. */
 static const struct {
        short kind;
-       unsigned char data1, data2;
+       unsigned char data;
        int type;
        unsigned int code;
        int value;
 }  ati_remote_tbl[] = {
        /* Directional control pad axes */
-       {KIND_ACCEL,   0x35, 0x70, EV_REL, REL_X, -1},   /* left */
-       {KIND_ACCEL,   0x36, 0x71, EV_REL, REL_X, 1},    /* right */
-       {KIND_ACCEL,   0x37, 0x72, EV_REL, REL_Y, -1},   /* up */
-       {KIND_ACCEL,   0x38, 0x73, EV_REL, REL_Y, 1},    /* down */
+       {KIND_ACCEL,   0x70, EV_REL, REL_X, -1},   /* left */
+       {KIND_ACCEL,   0x71, EV_REL, REL_X, 1},    /* right */
+       {KIND_ACCEL,   0x72, EV_REL, REL_Y, -1},   /* up */
+       {KIND_ACCEL,   0x73, EV_REL, REL_Y, 1},    /* down */
        /* Directional control pad diagonals */
-       {KIND_LU,      0x39, 0x74, EV_REL, 0, 0},        /* left up */
-       {KIND_RU,      0x3a, 0x75, EV_REL, 0, 0},        /* right up */
-       {KIND_LD,      0x3c, 0x77, EV_REL, 0, 0},        /* left down */
-       {KIND_RD,      0x3b, 0x76, EV_REL, 0, 0},        /* right down */
+       {KIND_LU,      0x74, EV_REL, 0, 0},        /* left up */
+       {KIND_RU,      0x75, EV_REL, 0, 0},        /* right up */
+       {KIND_LD,      0x77, EV_REL, 0, 0},        /* left down */
+       {KIND_RD,      0x76, EV_REL, 0, 0},        /* right down */
 
        /* "Mouse button" buttons */
-       {KIND_LITERAL, 0x3d, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
-       {KIND_LITERAL, 0x3e, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
-       {KIND_LITERAL, 0x41, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
-       {KIND_LITERAL, 0x42, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
+       {KIND_LITERAL, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
+       {KIND_LITERAL, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
+       {KIND_LITERAL, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
+       {KIND_LITERAL, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
 
        /* Artificial "doubleclick" events are generated by the hardware.
         * They are mapped to the "side" and "extra" mouse buttons here. */
-       {KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
-       {KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
+       {KIND_FILTERED, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
+       {KIND_FILTERED, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
 
        /* Non-mouse events are handled by rc-core */
-       {KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
+       {KIND_END, 0x00, EV_MAX + 1, 0, 0}
 };
 
 /* Local function prototypes */
@@ -396,25 +396,6 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
        return retval;
 }
 
-/*
- *     ati_remote_event_lookup
- */
-static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
-{
-       int i;
-
-       for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
-               /*
-                * Decide if the table entry matches the remote input.
-                */
-               if (ati_remote_tbl[i].data1 == d1 &&
-                   ati_remote_tbl[i].data2 == d2)
-                       return i;
-
-       }
-       return -1;
-}
-
 /*
  *     ati_remote_compute_accel
  *
@@ -463,7 +444,15 @@ static void ati_remote_input_report(struct urb *urb)
        int index = -1;
        int acc;
        int remote_num;
-       unsigned char scancode[2];
+       unsigned char scancode;
+       int i;
+
+       /*
+        * data[0] = 0x14
+        * data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
+        * data[2] = the key code (with toggle bit in MSB with some models)
+        * data[3] = channel << 4 (the low 4 bits must be zero)
+        */
 
        /* Deal with strange looking inputs */
        if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@@ -472,6 +461,13 @@ static void ati_remote_input_report(struct urb *urb)
                return;
        }
 
+       if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
+               dbginfo(&ati_remote->interface->dev,
+                       "wrong checksum in input: %02x %02x %02x %02x\n",
+                       data[0], data[1], data[2], data[3]);
+               return;
+       }
+
        /* Mask unwanted remote channels.  */
        /* note: remote_num is 0-based, channel 1 on remote == 0 here */
        remote_num = (data[3] >> 4) & 0x0f;
@@ -482,31 +478,30 @@ static void ati_remote_input_report(struct urb *urb)
                return;
        }
 
-       scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
-
        /*
-        * Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
-        * so we have to clear them. The first bit is a bit tricky as the
-        * "non-toggled" state depends on remote_num, so we xor it with the
-        * second bit which is only used for toggle.
+        * MSB is a toggle code, though only used by some devices
+        * (e.g. SnapStream Firefly)
         */
-       scancode[0] ^= (data[2] & 0x80);
-
-       scancode[1] = data[2] & ~0x80;
+       scancode = data[2] & 0x7f;
 
-       /* Look up event code index in mouse translation table. */
-       index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
+       /* Look up event code index in the mouse translation table. */
+       for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+               if (scancode == ati_remote_tbl[i].data) {
+                       index = i;
+                       break;
+               }
+       }
 
        if (index >= 0) {
                dbginfo(&ati_remote->interface->dev,
-                       "channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
-                       remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+                       "channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
+                       remote_num, data[2], index, ati_remote_tbl[index].code);
                if (!dev)
                        return; /* no mouse device */
        } else
                dbginfo(&ati_remote->interface->dev,
-                       "channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
-                       remote_num, data[1], data[2], scancode[0], scancode[1]);
+                       "channel 0x%02x; key data %02x, scancode %02x\n",
+                       remote_num, data[2], scancode);
 
 
        if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
@@ -523,8 +518,7 @@ static void ati_remote_input_report(struct urb *urb)
                unsigned long now = jiffies;
 
                /* Filter duplicate events which happen "too close" together. */
-               if (ati_remote->old_data[0] == data[1] &&
-                   ati_remote->old_data[1] == data[2] &&
+               if (ati_remote->old_data == data[2] &&
                    time_before(now, ati_remote->old_jiffies +
                                     msecs_to_jiffies(repeat_filter))) {
                        ati_remote->repeat_count++;
@@ -533,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
                        ati_remote->first_jiffies = now;
                }
 
-               ati_remote->old_data[0] = data[1];
-               ati_remote->old_data[1] = data[2];
+               ati_remote->old_data = data[2];
                ati_remote->old_jiffies = now;
 
                /* Ensure we skip at least the 4 first duplicate events (generated
@@ -549,14 +542,13 @@ static void ati_remote_input_report(struct urb *urb)
 
                if (index < 0) {
                        /* Not a mouse event, hand it to rc-core. */
-                       u32 rc_code = (scancode[0] << 8) | scancode[1];
 
                        /*
                         * We don't use the rc-core repeat handling yet as
                         * it would cause ghost repeats which would be a
                         * regression for this driver.
                         */
-                       rc_keydown_notimeout(ati_remote->rdev, rc_code,
+                       rc_keydown_notimeout(ati_remote->rdev, scancode,
                                             data[2]);
                        rc_keyup(ati_remote->rdev);
                        return;
@@ -607,8 +599,7 @@ static void ati_remote_input_report(struct urb *urb)
                input_sync(dev);
 
                ati_remote->old_jiffies = jiffies;
-               ati_remote->old_data[0] = data[1];
-               ati_remote->old_data[1] = data[2];
+               ati_remote->old_data = data[2];
        }
 }
 
index e1b8b26..8150644 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table ati_x10[] = {
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xc500, KEY_A },
-       { 0xc601, KEY_B },
-       { 0xde19, KEY_C },
-       { 0xe01b, KEY_D },
-       { 0xe621, KEY_E },
-       { 0xe823, KEY_F },
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x00, KEY_A },
+       { 0x01, KEY_B },
+       { 0x19, KEY_C },
+       { 0x1b, KEY_D },
+       { 0x21, KEY_E },
+       { 0x23, KEY_F },
 
-       { 0xdd18, KEY_KPENTER },    /* "check" */
-       { 0xdb16, KEY_MENU },       /* "menu" */
-       { 0xc702, KEY_POWER },      /* Power */
-       { 0xc803, KEY_TV },         /* TV */
-       { 0xc904, KEY_DVD },        /* DVD */
-       { 0xca05, KEY_WWW },        /* WEB */
-       { 0xcb06, KEY_BOOKMARKS },  /* "book" */
-       { 0xcc07, KEY_EDIT },       /* "hand" */
-       { 0xe11c, KEY_COFFEE },     /* "timer" */
-       { 0xe520, KEY_FRONT },      /* "max" */
-       { 0xe21d, KEY_LEFT },       /* left */
-       { 0xe41f, KEY_RIGHT },      /* right */
-       { 0xe722, KEY_DOWN },       /* down */
-       { 0xdf1a, KEY_UP },         /* up */
-       { 0xe31e, KEY_OK },         /* "OK" */
-       { 0xce09, KEY_VOLUMEDOWN }, /* VOL + */
-       { 0xcd08, KEY_VOLUMEUP },   /* VOL - */
-       { 0xcf0a, KEY_MUTE },       /* MUTE  */
-       { 0xd00b, KEY_CHANNELUP },  /* CH + */
-       { 0xd10c, KEY_CHANNELDOWN },/* CH - */
-       { 0xec27, KEY_RECORD },     /* ( o) red */
-       { 0xea25, KEY_PLAY },       /* ( >) */
-       { 0xe924, KEY_REWIND },     /* (<<) */
-       { 0xeb26, KEY_FORWARD },    /* (>>) */
-       { 0xed28, KEY_STOP },       /* ([]) */
-       { 0xee29, KEY_PAUSE },      /* ('') */
-       { 0xf02b, KEY_PREVIOUS },   /* (<-) */
-       { 0xef2a, KEY_NEXT },       /* (>+) */
-       { 0xf22d, KEY_INFO },       /* PLAYING */
-       { 0xf32e, KEY_HOME },       /* TOP */
-       { 0xf42f, KEY_END },        /* END */
-       { 0xf530, KEY_SELECT },     /* SELECT */
+       { 0x18, KEY_KPENTER },    /* "check" */
+       { 0x16, KEY_MENU },       /* "menu" */
+       { 0x02, KEY_POWER },      /* Power */
+       { 0x03, KEY_TV },         /* TV */
+       { 0x04, KEY_DVD },        /* DVD */
+       { 0x05, KEY_WWW },        /* WEB */
+       { 0x06, KEY_BOOKMARKS },  /* "book" */
+       { 0x07, KEY_EDIT },       /* "hand" */
+       { 0x1c, KEY_COFFEE },     /* "timer" */
+       { 0x20, KEY_FRONT },      /* "max" */
+       { 0x1d, KEY_LEFT },       /* left */
+       { 0x1f, KEY_RIGHT },      /* right */
+       { 0x22, KEY_DOWN },       /* down */
+       { 0x1a, KEY_UP },         /* up */
+       { 0x1e, KEY_OK },         /* "OK" */
+       { 0x09, KEY_VOLUMEDOWN }, /* VOL + */
+       { 0x08, KEY_VOLUMEUP },   /* VOL - */
+       { 0x0a, KEY_MUTE },       /* MUTE  */
+       { 0x0b, KEY_CHANNELUP },  /* CH + */
+       { 0x0c, KEY_CHANNELDOWN },/* CH - */
+       { 0x27, KEY_RECORD },     /* ( o) red */
+       { 0x25, KEY_PLAY },       /* ( >) */
+       { 0x24, KEY_REWIND },     /* (<<) */
+       { 0x26, KEY_FORWARD },    /* (>>) */
+       { 0x28, KEY_STOP },       /* ([]) */
+       { 0x29, KEY_PAUSE },      /* ('') */
+       { 0x2b, KEY_PREVIOUS },   /* (<-) */
+       { 0x2a, KEY_NEXT },       /* (>+) */
+       { 0x2d, KEY_INFO },       /* PLAYING */
+       { 0x2e, KEY_HOME },       /* TOP */
+       { 0x2f, KEY_END },        /* END */
+       { 0x30, KEY_SELECT },     /* SELECT */
 };
 
 static struct rc_map_list ati_x10_map = {
index 09e2cc0..479cdb8 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table medion_x10[] = {
-       { 0xf12c, KEY_TV },    /* TV */
-       { 0xf22d, KEY_VCR },   /* VCR */
-       { 0xc904, KEY_DVD },   /* DVD */
-       { 0xcb06, KEY_AUDIO }, /* MUSIC */
-
-       { 0xf32e, KEY_RADIO },     /* RADIO */
-       { 0xca05, KEY_DIRECTORY }, /* PHOTO */
-       { 0xf42f, KEY_INFO },      /* TV-PREVIEW */
-       { 0xf530, KEY_LIST },      /* CHANNEL-LST */
-
-       { 0xe01b, KEY_SETUP }, /* SETUP */
-       { 0xf631, KEY_VIDEO }, /* VIDEO DESKTOP */
-
-       { 0xcd08, KEY_VOLUMEDOWN },  /* VOL - */
-       { 0xce09, KEY_VOLUMEUP },    /* VOL + */
-       { 0xd00b, KEY_CHANNELUP },   /* CHAN + */
-       { 0xd10c, KEY_CHANNELDOWN }, /* CHAN - */
-       { 0xc500, KEY_MUTE },        /* MUTE */
-
-       { 0xf732, KEY_RED }, /* red */
-       { 0xf833, KEY_GREEN }, /* green */
-       { 0xf934, KEY_YELLOW }, /* yellow */
-       { 0xfa35, KEY_BLUE }, /* blue */
-       { 0xdb16, KEY_TEXT }, /* TXT */
-
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xe11c, KEY_SEARCH }, /* TV/RAD, CH SRC */
-       { 0xe520, KEY_DELETE }, /* DELETE */
-
-       { 0xfb36, KEY_KEYBOARD }, /* RENAME */
-       { 0xdd18, KEY_SCREEN },   /* SNAPSHOT */
-
-       { 0xdf1a, KEY_UP },    /* up */
-       { 0xe722, KEY_DOWN },  /* down */
-       { 0xe21d, KEY_LEFT },  /* left */
-       { 0xe41f, KEY_RIGHT }, /* right */
-       { 0xe31e, KEY_OK },    /* OK */
-
-       { 0xfc37, KEY_SELECT }, /* ACQUIRE IMAGE */
-       { 0xfd38, KEY_EDIT },   /* EDIT IMAGE */
-
-       { 0xe924, KEY_REWIND },   /* rewind  (<<) */
-       { 0xea25, KEY_PLAY },     /* play    ( >) */
-       { 0xeb26, KEY_FORWARD },  /* forward (>>) */
-       { 0xec27, KEY_RECORD },   /* record  ( o) */
-       { 0xed28, KEY_STOP },     /* stop    ([]) */
-       { 0xee29, KEY_PAUSE },    /* pause   ('') */
-
-       { 0xe621, KEY_PREVIOUS },        /* prev */
-       { 0xfe39, KEY_SWITCHVIDEOMODE }, /* F SCR */
-       { 0xe823, KEY_NEXT },            /* next */
-       { 0xde19, KEY_MENU },            /* MENU */
-       { 0xff3a, KEY_LANGUAGE },        /* AUDIO */
-
-       { 0xc702, KEY_POWER }, /* POWER */
+       { 0x2c, KEY_TV },    /* TV */
+       { 0x2d, KEY_VCR },   /* VCR */
+       { 0x04, KEY_DVD },   /* DVD */
+       { 0x06, KEY_AUDIO }, /* MUSIC */
+
+       { 0x2e, KEY_RADIO },     /* RADIO */
+       { 0x05, KEY_DIRECTORY }, /* PHOTO */
+       { 0x2f, KEY_INFO },      /* TV-PREVIEW */
+       { 0x30, KEY_LIST },      /* CHANNEL-LST */
+
+       { 0x1b, KEY_SETUP }, /* SETUP */
+       { 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
+
+       { 0x08, KEY_VOLUMEDOWN },  /* VOL - */
+       { 0x09, KEY_VOLUMEUP },    /* VOL + */
+       { 0x0b, KEY_CHANNELUP },   /* CHAN + */
+       { 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
+       { 0x00, KEY_MUTE },        /* MUTE */
+
+       { 0x32, KEY_RED }, /* red */
+       { 0x33, KEY_GREEN }, /* green */
+       { 0x34, KEY_YELLOW }, /* yellow */
+       { 0x35, KEY_BLUE }, /* blue */
+       { 0x16, KEY_TEXT }, /* TXT */
+
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
+       { 0x20, KEY_DELETE }, /* DELETE */
+
+       { 0x36, KEY_KEYBOARD }, /* RENAME */
+       { 0x18, KEY_SCREEN },   /* SNAPSHOT */
+
+       { 0x1a, KEY_UP },    /* up */
+       { 0x22, KEY_DOWN },  /* down */
+       { 0x1d, KEY_LEFT },  /* left */
+       { 0x1f, KEY_RIGHT }, /* right */
+       { 0x1e, KEY_OK },    /* OK */
+
+       { 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
+       { 0x38, KEY_EDIT },   /* EDIT IMAGE */
+
+       { 0x24, KEY_REWIND },   /* rewind  (<<) */
+       { 0x25, KEY_PLAY },     /* play    ( >) */
+       { 0x26, KEY_FORWARD },  /* forward (>>) */
+       { 0x27, KEY_RECORD },   /* record  ( o) */
+       { 0x28, KEY_STOP },     /* stop    ([]) */
+       { 0x29, KEY_PAUSE },    /* pause   ('') */
+
+       { 0x21, KEY_PREVIOUS },        /* prev */
+       { 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
+       { 0x23, KEY_NEXT },            /* next */
+       { 0x19, KEY_MENU },            /* MENU */
+       { 0x3a, KEY_LANGUAGE },        /* AUDIO */
+
+       { 0x02, KEY_POWER }, /* POWER */
 };
 
 static struct rc_map_list medion_x10_map = {
index ef14652..c7f33ec 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table snapstream_firefly[] = {
-       { 0xf12c, KEY_ZOOM },       /* Maximize */
-       { 0xc702, KEY_CLOSE },
-
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xdb16, KEY_BACK },
-       { 0xdd18, KEY_KPENTER },    /* ent */
-
-       { 0xce09, KEY_VOLUMEUP },
-       { 0xcd08, KEY_VOLUMEDOWN },
-       { 0xcf0a, KEY_MUTE },
-       { 0xd00b, KEY_CHANNELUP },
-       { 0xd10c, KEY_CHANNELDOWN },
-       { 0xc500, KEY_VENDOR },     /* firefly */
-
-       { 0xf32e, KEY_INFO },
-       { 0xf42f, KEY_OPTION },
-
-       { 0xe21d, KEY_LEFT },
-       { 0xe41f, KEY_RIGHT },
-       { 0xe722, KEY_DOWN },
-       { 0xdf1a, KEY_UP },
-       { 0xe31e, KEY_OK },
-
-       { 0xe11c, KEY_MENU },
-       { 0xe520, KEY_EXIT },
-
-       { 0xec27, KEY_RECORD },
-       { 0xea25, KEY_PLAY },
-       { 0xed28, KEY_STOP },
-       { 0xe924, KEY_REWIND },
-       { 0xeb26, KEY_FORWARD },
-       { 0xee29, KEY_PAUSE },
-       { 0xf02b, KEY_PREVIOUS },
-       { 0xef2a, KEY_NEXT },
-
-       { 0xcb06, KEY_AUDIO },      /* Music */
-       { 0xca05, KEY_IMAGES },     /* Photos */
-       { 0xc904, KEY_DVD },
-       { 0xc803, KEY_TV },
-       { 0xcc07, KEY_VIDEO },
-
-       { 0xc601, KEY_HELP },
-       { 0xf22d, KEY_MODE },       /* Mouse */
-
-       { 0xde19, KEY_A },
-       { 0xe01b, KEY_B },
-       { 0xe621, KEY_C },
-       { 0xe823, KEY_D },
+       { 0x2c, KEY_ZOOM },       /* Maximize */
+       { 0x02, KEY_CLOSE },
+
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x16, KEY_BACK },
+       { 0x18, KEY_KPENTER },    /* ent */
+
+       { 0x09, KEY_VOLUMEUP },
+       { 0x08, KEY_VOLUMEDOWN },
+       { 0x0a, KEY_MUTE },
+       { 0x0b, KEY_CHANNELUP },
+       { 0x0c, KEY_CHANNELDOWN },
+       { 0x00, KEY_VENDOR },     /* firefly */
+
+       { 0x2e, KEY_INFO },
+       { 0x2f, KEY_OPTION },
+
+       { 0x1d, KEY_LEFT },
+       { 0x1f, KEY_RIGHT },
+       { 0x22, KEY_DOWN },
+       { 0x1a, KEY_UP },
+       { 0x1e, KEY_OK },
+
+       { 0x1c, KEY_MENU },
+       { 0x20, KEY_EXIT },
+
+       { 0x27, KEY_RECORD },
+       { 0x25, KEY_PLAY },
+       { 0x28, KEY_STOP },
+       { 0x24, KEY_REWIND },
+       { 0x26, KEY_FORWARD },
+       { 0x29, KEY_PAUSE },
+       { 0x2b, KEY_PREVIOUS },
+       { 0x2a, KEY_NEXT },
+
+       { 0x06, KEY_AUDIO },      /* Music */
+       { 0x05, KEY_IMAGES },     /* Photos */
+       { 0x04, KEY_DVD },
+       { 0x03, KEY_TV },
+       { 0x07, KEY_VIDEO },
+
+       { 0x01, KEY_HELP },
+       { 0x2d, KEY_MODE },       /* Mouse */
+
+       { 0x19, KEY_A },
+       { 0x1b, KEY_B },
+       { 0x21, KEY_C },
+       { 0x23, KEY_D },
 };
 
 static struct rc_map_list snapstream_firefly_map = {
index 39fc923..1c6015a 100644 (file)
@@ -162,11 +162,14 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
        switch (tv.model) {
        case 72000: /* WinTV-HVR950q (Retail, IR, ATSC/QAM */
        case 72001: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72101: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72201: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72211: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
        case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
        case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
                break;
@@ -324,6 +327,10 @@ struct usb_device_id au0828_usb_id_table[] = {
                .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
        { USB_DEVICE(0x2040, 0x8200),
                .driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
+       { USB_DEVICE(0x2040, 0x7260),
+               .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+       { USB_DEVICE(0x2040, 0x7213),
+               .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
        { },
 };
 
index 89d09a8..82c8817 100644 (file)
@@ -162,7 +162,6 @@ struct m5mols_version {
  * @pad: media pad
  * @ffmt: current fmt according to resolution type
  * @res_type: current resolution type
- * @code: current code
  * @irq_waitq: waitqueue for the capture
  * @work_irq: workqueue for the IRQ
  * @flags: state variable for the interrupt handler
@@ -192,7 +191,6 @@ struct m5mols_info {
        struct media_pad pad;
        struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
        int res_type;
-       enum v4l2_mbus_pixelcode code;
        wait_queue_head_t irq_waitq;
        struct work_struct work_irq;
        unsigned long flags;
index 05ab370..e0f09e5 100644 (file)
@@ -334,7 +334,7 @@ int m5mols_mode(struct m5mols_info *info, u8 mode)
        int ret = -EINVAL;
        u8 reg;
 
-       if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+       if (mode < REG_PARAMETER || mode > REG_CAPTURE)
                return ret;
 
        ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
@@ -511,9 +511,6 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        struct m5mols_info *info = to_m5mols(sd);
        struct v4l2_mbus_framefmt *format;
 
-       if (fmt->pad != 0)
-               return -EINVAL;
-
        format = __find_format(info, fh, fmt->which, info->res_type);
        if (!format)
                return -EINVAL;
@@ -532,9 +529,6 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        u32 resolution = 0;
        int ret;
 
-       if (fmt->pad != 0)
-               return -EINVAL;
-
        ret = __find_resolution(sd, format, &type, &resolution);
        if (ret < 0)
                return ret;
@@ -543,13 +537,14 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        if (!sfmt)
                return 0;
 
-       *sfmt           = m5mols_default_ffmt[type];
-       sfmt->width     = format->width;
-       sfmt->height    = format->height;
+
+       format->code = m5mols_default_ffmt[type].code;
+       format->colorspace = V4L2_COLORSPACE_JPEG;
+       format->field = V4L2_FIELD_NONE;
 
        if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+               *sfmt = *format;
                info->resolution = resolution;
-               info->code = format->code;
                info->res_type = type;
        }
 
@@ -626,13 +621,14 @@ static int m5mols_start_monitor(struct m5mols_info *info)
 static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
 {
        struct m5mols_info *info = to_m5mols(sd);
+       u32 code = info->ffmt[info->res_type].code;
 
        if (enable) {
                int ret = -EINVAL;
 
-               if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+               if (is_code(code, M5MOLS_RESTYPE_MONITOR))
                        ret = m5mols_start_monitor(info);
-               if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+               if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
                        ret = m5mols_start_capture(info);
 
                return ret;
index cf2c0fb..398f96f 100644 (file)
@@ -955,6 +955,7 @@ static int mt9m111_probe(struct i2c_client *client,
        mt9m111->rect.height    = MT9M111_MAX_HEIGHT;
        mt9m111->fmt            = &mt9m111_colour_fmts[0];
        mt9m111->lastpage       = -1;
+       mutex_init(&mt9m111->power_lock);
 
        ret = mt9m111_video_probe(client);
        if (ret) {
index 32114a3..7b34b11 100644 (file)
@@ -1083,8 +1083,10 @@ static int mt9t112_probe(struct i2c_client *client,
        v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
 
        ret = mt9t112_camera_probe(client);
-       if (ret)
+       if (ret) {
                kfree(priv);
+               return ret;
+       }
 
        /* Cannot fail: using the default supported pixel code */
        mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
index 9c5c19f..ee0d0b3 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/irq.h>
 #include <linux/videodev2.h>
 #include <linux/dma-mapping.h>
+#include <linux/slab.h>
 
 #include <media/videobuf-dma-contig.h>
 #include <media/v4l2-device.h>
@@ -2169,6 +2170,14 @@ static int __init omap_vout_probe(struct platform_device *pdev)
        vid_dev->num_displays = 0;
        for_each_dss_dev(dssdev) {
                omap_dss_get_device(dssdev);
+
+               if (!dssdev->driver) {
+                       dev_warn(&pdev->dev, "no driver for display: %s\n",
+                                       dssdev->name);
+                       omap_dss_put_device(dssdev);
+                       continue;
+               }
+
                vid_dev->displays[vid_dev->num_displays++] = dssdev;
        }
 
index e87ae2f..6a6cf38 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
index 1d54b86..3ea38a8 100644 (file)
@@ -506,7 +506,7 @@ int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
        unsigned long flags;
        struct sgdma_state *sg_state;
 
-       if ((sglen < 0) || ((sglen > 0) & !sglist))
+       if ((sglen < 0) || ((sglen > 0) && !sglist))
                return -EINVAL;
 
        spin_lock_irqsave(&sgdma->lock, flags);
index d100072..f229057 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/cacheflush.h>
 #include <linux/clk.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
index 9f2d26b..6806345 100644 (file)
@@ -540,7 +540,7 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe,
 static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        struct soc_camera_sense *sense = icd->sense;
        struct ov6650 *priv = to_ov6650(client);
        bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
index c8d91b0..2cc3b91 100644 (file)
@@ -98,6 +98,10 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
                        vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
        }
        set_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+       fimc_hw_reset(fimc);
+       cap->buf_index = 0;
+
        spin_unlock_irqrestore(&fimc->slock, flags);
 
        if (streaming)
@@ -137,7 +141,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
        struct fimc_dev *fimc = ctx->fimc_dev;
        int ret;
 
-       if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+       if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
                return 0;
 
        spin_lock(&ctx->slock);
@@ -150,7 +154,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
                fimc_hw_set_rotation(ctx);
                fimc_prepare_dma_offset(ctx, &ctx->d_frame);
                fimc_hw_set_out_dma(ctx);
-               set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+               clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
        }
        spin_unlock(&ctx->slock);
        return ret;
@@ -164,7 +168,6 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
        int min_bufs;
        int ret;
 
-       fimc_hw_reset(fimc);
        vid_cap->frame_count = 0;
 
        ret = fimc_init_capture(fimc);
@@ -523,7 +526,7 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
        max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
        min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
        min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
-       if (fimc->id == 1 && var->pix_hoff)
+       if (var->min_vsize_align == 1 && !rotation)
                align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
 
        depth = fimc_get_format_depth(ffmt);
@@ -1239,6 +1242,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
 
        mutex_lock(&fimc->lock);
        set_frame_bounds(ff, mf->width, mf->height);
+       fimc->vid_cap.mf = *mf;
        ff->fmt = ffmt;
 
        /* Reset the crop rectangle if required. */
@@ -1375,7 +1379,7 @@ static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
        media_entity_cleanup(&sd->entity);
        v4l2_device_unregister_subdev(sd);
        kfree(sd);
-       sd = NULL;
+       fimc->vid_cap.subdev = NULL;
 }
 
 /* Set default format at the sensor and host interface */
index 19ca6db..07c6254 100644 (file)
@@ -37,7 +37,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
 static struct fimc_fmt fimc_formats[] = {
        {
                .name           = "RGB565",
-               .fourcc         = V4L2_PIX_FMT_RGB565X,
+               .fourcc         = V4L2_PIX_FMT_RGB565,
                .depth          = { 16 },
                .color          = S5P_FIMC_RGB565,
                .memplanes      = 1,
@@ -1038,12 +1038,11 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
                mod_x = 6; /* 64 x 32 pixels tile */
                mod_y = 5;
        } else {
-               if (fimc->id == 1 && variant->pix_hoff)
+               if (variant->min_vsize_align == 1)
                        mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
                else
-                       mod_y = mod_x;
+                       mod_y = ffs(variant->min_vsize_align) - 1;
        }
-       dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_w);
 
        v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
                &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
@@ -1226,10 +1225,10 @@ static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
                fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
 
        /* Get pixel alignment constraints. */
-       if (fimc->id == 1 && fimc->variant->pix_hoff)
+       if (fimc->variant->min_vsize_align == 1)
                halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
        else
-               halign = ffs(min_size) - 1;
+               halign = ffs(fimc->variant->min_vsize_align) - 1;
 
        for (i = 0; i < f->fmt->colplanes; i++)
                depth += f->fmt->depth[i];
@@ -1615,7 +1614,6 @@ static int fimc_probe(struct platform_device *pdev)
        pdata = pdev->dev.platform_data;
        fimc->pdata = pdata;
 
-       set_bit(ST_LPM, &fimc->state);
 
        init_waitqueue_head(&fimc->irq_queue);
        spin_lock_init(&fimc->slock);
@@ -1707,8 +1705,6 @@ static int fimc_runtime_resume(struct device *dev)
        /* Enable clocks and perform basic initalization */
        clk_enable(fimc->clock[CLK_GATE]);
        fimc_hw_reset(fimc);
-       if (fimc->variant->out_buf_count > 4)
-               fimc_hw_set_dma_seq(fimc, 0xF);
 
        /* Resume the capture or mem-to-mem device */
        if (fimc_capture_busy(fimc))
@@ -1750,8 +1746,6 @@ static int fimc_resume(struct device *dev)
                return 0;
        }
        fimc_hw_reset(fimc);
-       if (fimc->variant->out_buf_count > 4)
-               fimc_hw_set_dma_seq(fimc, 0xF);
        spin_unlock_irqrestore(&fimc->slock, flags);
 
        if (fimc_capture_busy(fimc))
@@ -1780,7 +1774,6 @@ static int __devexit fimc_remove(struct platform_device *pdev)
        struct fimc_dev *fimc = platform_get_drvdata(pdev);
 
        pm_runtime_disable(&pdev->dev);
-       fimc_runtime_suspend(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
 
        vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
@@ -1840,6 +1833,7 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[0],
 };
@@ -1849,6 +1843,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit = &s5p_pix_limit[1],
 };
@@ -1861,6 +1856,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[1],
 };
@@ -1874,6 +1870,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 1,
+       .min_vsize_align = 1,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[2],
 };
@@ -1884,6 +1881,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[2],
 };
@@ -1898,6 +1896,7 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 2,
+       .min_vsize_align = 1,
        .out_buf_count   = 32,
        .pix_limit       = &s5p_pix_limit[1],
 };
@@ -1910,6 +1909,7 @@ static struct samsung_fimc_variant fimc3_variant_exynos4 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 2,
+       .min_vsize_align = 1,
        .out_buf_count   = 32,
        .pix_limit       = &s5p_pix_limit[3],
 };
index a6936da..c7f01c4 100644 (file)
@@ -377,6 +377,7 @@ struct fimc_pix_limit {
  * @min_inp_pixsize: minimum input pixel size
  * @min_out_pixsize: minimum output pixel size
  * @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
  * @out_buf_count: the number of buffers in output DMA sequence
  */
 struct samsung_fimc_variant {
@@ -390,6 +391,7 @@ struct samsung_fimc_variant {
        u16             min_inp_pixsize;
        u16             min_out_pixsize;
        u16             hor_offs_align;
+       u16             min_vsize_align;
        u16             out_buf_count;
 };
 
index cc337b1..615c862 100644 (file)
@@ -220,6 +220,7 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
        sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
                                       s_info->pdata->board_info, NULL);
        if (IS_ERR_OR_NULL(sd)) {
+               i2c_put_adapter(adapter);
                v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
                return NULL;
        }
@@ -234,12 +235,15 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
 static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
+       struct i2c_adapter *adapter;
 
        if (!client)
                return;
        v4l2_device_unregister_subdev(sd);
+       adapter = client->adapter;
        i2c_unregister_device(client);
-       i2c_put_adapter(client->adapter);
+       if (adapter)
+               i2c_put_adapter(adapter);
 }
 
 static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
@@ -381,20 +385,28 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
 
 static int fimc_md_register_video_nodes(struct fimc_md *fmd)
 {
+       struct video_device *vdev;
        int i, ret = 0;
 
        for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
                if (!fmd->fimc[i])
                        continue;
 
-               if (fmd->fimc[i]->m2m.vfd)
-                       ret = video_register_device(fmd->fimc[i]->m2m.vfd,
-                                                   VFL_TYPE_GRABBER, -1);
-               if (ret)
-                       break;
-               if (fmd->fimc[i]->vid_cap.vfd)
-                       ret = video_register_device(fmd->fimc[i]->vid_cap.vfd,
-                                                   VFL_TYPE_GRABBER, -1);
+               vdev = fmd->fimc[i]->m2m.vfd;
+               if (vdev) {
+                       ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+                       if (ret)
+                               break;
+                       v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+                                 vdev->name, video_device_node_name(vdev));
+               }
+
+               vdev = fmd->fimc[i]->vid_cap.vfd;
+               if (vdev == NULL)
+                       continue;
+               ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+               v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+                         vdev->name, video_device_node_name(vdev));
        }
 
        return ret;
@@ -502,7 +514,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
                        if (WARN(csis == NULL,
                                 "MIPI-CSI interface specified "
                                 "but s5p-csis module is not loaded!\n"))
-                               continue;
+                               return -EINVAL;
 
                        ret = media_entity_create_link(&sensor->entity, 0,
                                              &csis->entity, CSIS_PAD_SINK,
@@ -742,9 +754,6 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
        struct fimc_md *fmd;
        int ret;
 
-       if (WARN(!pdev->dev.platform_data, "Platform data not specified!\n"))
-               return -EINVAL;
-
        fmd = kzalloc(sizeof(struct fimc_md), GFP_KERNEL);
        if (!fmd)
                return -ENOMEM;
@@ -782,9 +791,11 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
        if (ret)
                goto err3;
 
-       ret = fimc_md_register_sensor_entities(fmd);
-       if (ret)
-               goto err3;
+       if (pdev->dev.platform_data) {
+               ret = fimc_md_register_sensor_entities(fmd);
+               if (ret)
+                       goto err3;
+       }
        ret = fimc_md_create_links(fmd);
        if (ret)
                goto err3;
index 20e664e..44f5c2d 100644 (file)
@@ -35,6 +35,9 @@ void fimc_hw_reset(struct fimc_dev *dev)
        cfg = readl(dev->regs + S5P_CIGCTRL);
        cfg &= ~S5P_CIGCTRL_SWRST;
        writel(cfg, dev->regs + S5P_CIGCTRL);
+
+       if (dev->variant->out_buf_count > 4)
+               fimc_hw_set_dma_seq(dev, 0xF);
 }
 
 static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
@@ -251,7 +254,14 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
        struct fimc_scaler *sc = &ctx->scaler;
        struct fimc_frame *src_frame = &ctx->s_frame;
        struct fimc_frame *dst_frame = &ctx->d_frame;
-       u32 cfg = 0;
+
+       u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+
+       cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
+                S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
+                S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
+                S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
+                S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
 
        if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
                cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -308,9 +318,9 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
        fimc_hw_set_scaler(ctx);
 
        cfg = readl(dev->regs + S5P_CISCCTRL);
+       cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
 
        if (variant->has_mainscaler_ext) {
-               cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
                cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
                cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CISCCTRL);
@@ -323,7 +333,6 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
                cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CIEXTEN);
        } else {
-               cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
                cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
                cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CISCCTRL);
index 1e8cdb7..dff9dc7 100644 (file)
@@ -61,7 +61,7 @@ static struct s5p_mfc_fmt formats[] = {
                .num_planes = 1,
        },
        {
-               .name = "H264 Encoded Stream",
+               .name = "H263 Encoded Stream",
                .fourcc = V4L2_PIX_FMT_H263,
                .codec_mode = S5P_FIMV_CODEC_H263_ENC,
                .type = MFC_FMT_ENC,
index e16d3a4..b47d0c0 100644 (file)
@@ -16,6 +16,7 @@
 #include <media/v4l2-ioctl.h>
 #include <linux/videodev2.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/version.h>
 #include <linux/timer.h>
 #include <media/videobuf2-dma-contig.h>
index f390682..c51decf 100644 (file)
@@ -566,8 +566,10 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
        ret = sh_mobile_ceu_soft_reset(pcdev);
 
        csi2_sd = find_csi2(pcdev);
-       if (csi2_sd)
-               csi2_sd->grp_id = (long)icd;
+       if (csi2_sd) {
+               csi2_sd->grp_id = soc_camera_grp_id(icd);
+               v4l2_set_subdev_hostdata(csi2_sd, icd);
+       }
 
        ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
        if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
@@ -768,7 +770,7 @@ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
 {
        if (pcdev->csi2_pdev) {
                struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
-               if (csi2_sd && csi2_sd->grp_id == (u32)icd)
+               if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
                        return csi2_sd;
        }
 
@@ -1089,8 +1091,9 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
                        /* Try 2560x1920, 1280x960, 640x480, 320x240 */
                        mf.width        = 2560 >> shift;
                        mf.height       = 1920 >> shift;
-                       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                        s_mbus_fmt, &mf);
+                       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, &mf);
                        if (ret < 0)
                                return ret;
                        shift++;
@@ -1389,7 +1392,8 @@ static int client_s_fmt(struct soc_camera_device *icd,
        bool ceu_1to1;
        int ret;
 
-       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
+       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                        soc_camera_grp_id(icd), video,
                                         s_mbus_fmt, mf);
        if (ret < 0)
                return ret;
@@ -1426,8 +1430,9 @@ static int client_s_fmt(struct soc_camera_device *icd,
                tmp_h = min(2 * tmp_h, max_height);
                mf->width = tmp_w;
                mf->height = tmp_h;
-               ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                s_mbus_fmt, mf);
+               ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, mf);
                dev_geo(dev, "Camera scaled to %ux%u\n",
                        mf->width, mf->height);
                if (ret < 0) {
@@ -1580,8 +1585,9 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
        }
 
        if (interm_width < icd->user_width || interm_height < icd->user_height) {
-               ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video,
-                                                s_mbus_fmt, &mf);
+               ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, &mf);
                if (ret < 0)
                        return ret;
 
@@ -1867,7 +1873,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
        mf.code         = xlate->code;
        mf.colorspace   = pix->colorspace;
 
-       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf);
+       ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+                                        video, try_mbus_fmt, &mf);
        if (ret < 0)
                return ret;
 
@@ -1891,8 +1898,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
                         */
                        mf.width = 2560;
                        mf.height = 1920;
-                       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                        try_mbus_fmt, &mf);
+                       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       try_mbus_fmt, &mf);
                        if (ret < 0) {
                                /* Shouldn't actually happen... */
                                dev_err(icd->parent,
index ea4f047..8a652b5 100644 (file)
@@ -143,7 +143,7 @@ static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
                                 const struct v4l2_mbus_config *cfg)
 {
        struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
        struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
                                              .flags = priv->mipi_flags};
@@ -202,7 +202,7 @@ static void sh_csi2_hwinit(struct sh_csi2 *priv)
 static int sh_csi2_client_connect(struct sh_csi2 *priv)
 {
        struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
-       struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
        struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
        struct device *dev = v4l2_get_subdevdata(&priv->subdev);
        struct v4l2_mbus_config cfg;
index b72580c..62e4312 100644 (file)
@@ -1103,7 +1103,8 @@ static int soc_camera_probe(struct soc_camera_device *icd)
        }
 
        sd = soc_camera_to_subdev(icd);
-       sd->grp_id = (long)icd;
+       sd->grp_id = soc_camera_grp_id(icd);
+       v4l2_set_subdev_hostdata(sd, icd);
 
        if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
                goto ectrl;
index 43c0ebb..b7b2d34 100644 (file)
@@ -4,7 +4,7 @@
  * Debugfs support for the AB5500 MFD driver
  */
 
-#include <linux/export.h>
+#include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/mfd/ab5500/ab5500.h>
index 1e91738..d3d572b 100644 (file)
@@ -620,6 +620,7 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
 
 static struct resource __devinitdata ab8500_chargalg_resources[] = {};
 
+#ifdef CONFIG_DEBUG_FS
 static struct resource __devinitdata ab8500_debug_resources[] = {
        {
                .name   = "IRQ_FIRST",
@@ -634,6 +635,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
                .flags  = IORESOURCE_IRQ,
        },
 };
+#endif
 
 static struct resource __devinitdata ab8500_usb_resources[] = {
        {
index f1d8848..8d816cc 100644 (file)
@@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
 
        ret = __adp5520_read(chip->client, reg, &reg_val);
 
-       if (!ret && ((reg_val & bit_mask) == 0)) {
+       if (!ret && ((reg_val & bit_mask) != bit_mask)) {
                reg_val |= bit_mask;
                ret = __adp5520_write(chip->client, reg, reg_val);
        }
index 1b79c37..1924b85 100644 (file)
@@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __da903x_write(chip->client, reg, reg_val);
        }
@@ -549,6 +549,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
        struct da903x_chip *chip = i2c_get_clientdata(client);
 
        da903x_remove_subdevs(chip);
+       free_irq(client->irq, chip);
        kfree(chip);
        return 0;
 }
index 1e9ee53..ef39528 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
index bba26d9..a5ddf31 100644 (file)
@@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
        }
index 6f5b8cf..c1da84b 100644 (file)
@@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
                goto out;
        }
 
-       data &= mask;
+       data &= ~mask;
        err = tps65910_i2c_write(tps65910, reg, 1, &data);
        if (err)
                dev_err(tps65910->dev, "write to reg %x failed\n", reg);
index bfbd660..61e70cf 100644 (file)
@@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /*
         * [MSG1]: fill the register address data
@@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /* [MSG1] fill the register address data */
        msg = &twl->xfer_msg[0];
index f062c8c..29f11e0 100644 (file)
@@ -432,6 +432,7 @@ struct sih_agent {
        u32                     edge_change;
 
        struct mutex            irq_lock;
+       char                    *irq_name;
 };
 
 /*----------------------------------------------------------------------*/
@@ -589,7 +590,7 @@ static inline int sih_read_isr(const struct sih *sih)
  * Generic handler for SIH interrupts ... we "know" this is called
  * in task context, with IRQs enabled.
  */
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
 {
        struct sih_agent *agent = irq_get_handler_data(irq);
        const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                pr_err("twl4030: %s SIH, read ISR error %d\n",
                        sih->name, isr);
                /* REVISIT:  recover; eventually mask it all, etc */
-               return;
+               return IRQ_HANDLED;
        }
 
        while (isr) {
@@ -616,6 +617,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                        pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
                                sih->name, irq);
        }
+       return IRQ_HANDLED;
 }
 
 static unsigned twl4030_irq_next;
@@ -668,18 +670,19 @@ int twl4030_sih_setup(int module)
                activate_irq(irq);
        }
 
-       status = irq_base;
        twl4030_irq_next += i;
 
        /* replace generic PIH handler (handle_simple_irq) */
        irq = sih_mod + twl4030_irq_base;
        irq_set_handler_data(irq, agent);
-       irq_set_chained_handler(irq, handle_twl4030_sih);
+       agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+       status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+                                     agent->irq_name ?: sih->name, NULL);
 
        pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
                        irq, irq_base, twl4030_irq_next - 1);
 
-       return status;
+       return status < 0 ? status : irq_base;
 }
 
 /* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +736,9 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
        }
 
        /* install an irq handler to demultiplex the TWL4030 interrupt */
-       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
-                                       "TWL4030-PIH", NULL);
+       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+                                     IRQF_ONESHOT,
+                                     "TWL4030-PIH", NULL);
        if (status < 0) {
                pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
                goto fail_rqirq;
index 5d6ba13..61894fc 100644 (file)
@@ -239,6 +239,7 @@ static int wm8994_suspend(struct device *dev)
 
        switch (wm8994->type) {
        case WM8958:
+       case WM1811:
                ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
                if (ret < 0) {
                        dev_err(dev, "Failed to read power status: %d\n", ret);
index a1cb21f..1e0e27c 100644 (file)
@@ -1606,6 +1606,14 @@ static const struct mmc_fixup blk_fixups[] =
                  MMC_QUIRK_BLK_NO_CMD23),
        MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
                  MMC_QUIRK_BLK_NO_CMD23),
+
+       /*
+        * Some Micron MMC cards needs longer data read timeout than
+        * indicated in CSD.
+        */
+       MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+                 MMC_QUIRK_LONG_READ_TIME),
+
        END_FIXUP
 };
 
index 5278ffb..950b97d 100644 (file)
@@ -529,6 +529,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
                        data->timeout_clks = 0;
                }
        }
+
+       /*
+        * Some cards require longer data read timeout than indicated in CSD.
+        * Address this by setting the read timeout to a "reasonably high"
+        * value. For the cards tested, 300ms has proven enough. If necessary,
+        * this value can be increased if other problematic cards require this.
+        */
+       if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+               data->timeout_ns = 300000000;
+               data->timeout_clks = 0;
+       }
+
        /*
         * Some cards need very high timeouts if driven in SPI mode.
         * The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
        mmc_host_clk_release(host);
 }
 
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+       struct mmc_card *card;
+       unsigned int timeout;
+       unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+       int err = 0;
+
+       card = host->card;
+
+       /*
+        * Send power notify command only if card
+        * is mmc and notify state is powered ON
+        */
+       if (card && mmc_card_mmc(card) &&
+           (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+               if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+                       notify_type = EXT_CSD_POWER_OFF_SHORT;
+                       timeout = card->ext_csd.generic_cmd6_time;
+                       card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+               } else {
+                       notify_type = EXT_CSD_POWER_OFF_LONG;
+                       timeout = card->ext_csd.power_off_longtime;
+                       card->poweroff_notify_state = MMC_POWEROFF_LONG;
+               }
+
+               err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+                                EXT_CSD_POWER_OFF_NOTIFICATION,
+                                notify_type, timeout);
+
+               if (err && err != -EBADMSG)
+                       pr_err("Device failed to respond within %d poweroff "
+                              "time. Forcefully powering down the device\n",
+                              timeout);
+
+               /* Set the card state to no notification after the poweroff */
+               card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+       }
+}
+
 /*
  * Apply power to the MMC stack.  This is a two-stage process.
  * First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@ static void mmc_power_up(struct mmc_host *host)
 
 void mmc_power_off(struct mmc_host *host)
 {
-       struct mmc_card *card;
-       unsigned int notify_type;
-       unsigned int timeout;
-       int err;
-
        mmc_host_clk_hold(host);
 
-       card = host->card;
        host->ios.clock = 0;
        host->ios.vdd = 0;
 
-       if (card && mmc_card_mmc(card) &&
-           (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
-               if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
-                       notify_type = EXT_CSD_POWER_OFF_SHORT;
-                       timeout = card->ext_csd.generic_cmd6_time;
-                       card->poweroff_notify_state = MMC_POWEROFF_SHORT;
-               } else {
-                       notify_type = EXT_CSD_POWER_OFF_LONG;
-                       timeout = card->ext_csd.power_off_longtime;
-                       card->poweroff_notify_state = MMC_POWEROFF_LONG;
-               }
-
-               err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                                EXT_CSD_POWER_OFF_NOTIFICATION,
-                                notify_type, timeout);
-
-               if (err && err != -EBADMSG)
-                       pr_err("Device failed to respond within %d poweroff "
-                              "time. Forcefully powering down the device\n",
-                              timeout);
-
-               /* Set the card state to no notification after the poweroff */
-               card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
-       }
+       mmc_poweroff_notify(host);
 
        /*
         * Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@ int mmc_card_sleep(struct mmc_host *host)
 
        mmc_bus_get(host);
 
-       if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+       if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
                err = host->bus_ops->sleep(host);
 
        mmc_bus_put(host);
@@ -2302,8 +2324,17 @@ int mmc_suspend_host(struct mmc_host *host)
                 * pre-claim the host.
                 */
                if (mmc_try_claim_host(host)) {
-                       if (host->bus_ops->suspend)
+                       if (host->bus_ops->suspend) {
+                               /*
+                                * For eMMC 4.5 device send notify command
+                                * before sleep, because in sleep state eMMC 4.5
+                                * devices respond to only RESET and AWAKE cmd
+                                */
+                               mmc_poweroff_notify(host);
                                err = host->bus_ops->suspend(host);
+                       }
+                       mmc_do_release_host(host);
+
                        if (err == -ENOSYS || !host->bus_ops->resume) {
                                /*
                                 * We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@ int mmc_suspend_host(struct mmc_host *host)
                                host->pm_flags = 0;
                                err = 0;
                        }
-                       mmc_do_release_host(host);
                } else {
                        err = -EBUSY;
                }
index e8a5eb3..d31c78b 100644 (file)
@@ -302,17 +302,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
        host->max_blk_size = 512;
        host->max_blk_count = PAGE_CACHE_SIZE / 512;
 
-       /*
-        * Enable runtime power management by default. This flag was added due
-        * to runtime power management causing disruption for some users, but
-        * the power on/off code has been improved since then.
-        *
-        * We'll enable this flag by default as an experiment, and if no
-        * problems are reported, we will follow up later and remove the flag
-        * altogether.
-        */
-       host->caps = MMC_CAP_POWER_OFF_CARD;
-
        return host;
 
 free:
index dbf421a..d240427 100644 (file)
@@ -876,17 +876,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
         * set the notification byte in the ext_csd register of device
         */
        if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
-           (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+           (card->ext_csd.rev >= 6)) {
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                 EXT_CSD_POWER_OFF_NOTIFICATION,
                                 EXT_CSD_POWER_ON,
                                 card->ext_csd.generic_cmd6_time);
                if (err && err != -EBADMSG)
                        goto free_card;
-       }
 
-       if (!err)
-               card->poweroff_notify_state = MMC_POWERED_ON;
+               /*
+                * The err can be -EBADMSG or 0,
+                * so check for success and update the flag
+                */
+               if (!err)
+                       card->poweroff_notify_state = MMC_POWERED_ON;
+       }
 
        /*
         * Activate high speed (if supported)
index 325ea61..8e0fbe9 100644 (file)
@@ -732,6 +732,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                                "failed to config DMA channel. Falling back to PIO\n");
                        dma_release_channel(host->dma);
                        host->do_dma = 0;
+                       host->dma = NULL;
                }
        }
 
index 101cd31..d5fe43d 100644 (file)
@@ -1010,6 +1010,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
                        host->data->sg_len,
                        omap_hsmmc_get_dma_dir(host, host->data));
                omap_free_dma(dma_ch);
+               host->data->host_cookie = 0;
        }
        host->data = NULL;
 }
@@ -1575,8 +1576,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
        struct mmc_data *data = mrq->data;
 
        if (host->use_dma) {
-               dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-                            omap_hsmmc_get_dma_dir(host, data));
+               if (data->host_cookie)
+                       dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+                                    data->sg_len,
+                                    omap_hsmmc_get_dma_dir(host, data));
                data->host_cookie = 0;
        }
 }
index 4b920b7..b4257e7 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/mmc/host.h>
+#include <linux/module.h>
 #include <mach/cns3xxx.h>
 #include "sdhci-pltfm.h"
 
@@ -108,13 +109,10 @@ static struct platform_driver sdhci_cns3xxx_driver = {
        .driver         = {
                .name   = "sdhci-cns3xxx",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_cns3xxx_probe,
        .remove         = __devexit_p(sdhci_cns3xxx_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_cns3xxx_init(void)
index f2d29dc..a81312c 100644 (file)
@@ -82,13 +82,10 @@ static struct platform_driver sdhci_dove_driver = {
        .driver         = {
                .name   = "sdhci-dove",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_dove_probe,
        .remove         = __devexit_p(sdhci_dove_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_dove_init(void)
index 4b976f0..38ebc4e 100644 (file)
@@ -599,14 +599,11 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
                .name   = "sdhci-esdhc-imx",
                .owner  = THIS_MODULE,
                .of_match_table = imx_esdhc_dt_ids,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .id_table       = imx_esdhc_devtype,
        .probe          = sdhci_esdhc_imx_probe,
        .remove         = __devexit_p(sdhci_esdhc_imx_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_esdhc_imx_init(void)
index 59e9d00..01e5f62 100644 (file)
@@ -125,13 +125,10 @@ static struct platform_driver sdhci_esdhc_driver = {
                .name = "sdhci-esdhc",
                .owner = THIS_MODULE,
                .of_match_table = sdhci_esdhc_of_match,
+               .pm = SDHCI_PLTFM_PMOPS,
        },
        .probe = sdhci_esdhc_probe,
        .remove = __devexit_p(sdhci_esdhc_remove),
-#ifdef CONFIG_PM
-       .suspend = sdhci_pltfm_suspend,
-       .resume = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_esdhc_init(void)
index 9b0d794..3619adc 100644 (file)
@@ -87,13 +87,10 @@ static struct platform_driver sdhci_hlwd_driver = {
                .name = "sdhci-hlwd",
                .owner = THIS_MODULE,
                .of_match_table = sdhci_hlwd_of_match,
+               .pm = SDHCI_PLTFM_PMOPS,
        },
        .probe = sdhci_hlwd_probe,
        .remove = __devexit_p(sdhci_hlwd_remove),
-#ifdef CONFIG_PM
-       .suspend = sdhci_pltfm_suspend,
-       .resume = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_hlwd_init(void)
index d833d9c..6878a94 100644 (file)
@@ -54,8 +54,7 @@ struct sdhci_pci_fixes {
        int                     (*probe_slot) (struct sdhci_pci_slot *);
        void                    (*remove_slot) (struct sdhci_pci_slot *, int);
 
-       int                     (*suspend) (struct sdhci_pci_chip *,
-                                       pm_message_t);
+       int                     (*suspend) (struct sdhci_pci_chip *);
        int                     (*resume) (struct sdhci_pci_chip *);
 };
 
@@ -549,7 +548,7 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
                jmicron_enable_mmc(slot->host, 0);
 }
 
-static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
+static int jmicron_suspend(struct sdhci_pci_chip *chip)
 {
        int i;
 
@@ -993,8 +992,9 @@ static struct sdhci_ops sdhci_pci_ops = {
 
 #ifdef CONFIG_PM
 
-static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int sdhci_pci_suspend(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
        mmc_pm_flag_t slot_pm_flags;
@@ -1010,7 +1010,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                if (!slot)
                        continue;
 
-               ret = sdhci_suspend_host(slot->host, state);
+               ret = sdhci_suspend_host(slot->host);
 
                if (ret) {
                        for (i--; i >= 0; i--)
@@ -1026,7 +1026,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
        }
 
        if (chip->fixes && chip->fixes->suspend) {
-               ret = chip->fixes->suspend(chip, state);
+               ret = chip->fixes->suspend(chip);
                if (ret) {
                        for (i = chip->num_slots - 1; i >= 0; i--)
                                sdhci_resume_host(chip->slots[i]->host);
@@ -1042,16 +1042,17 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                }
                pci_set_power_state(pdev, PCI_D3hot);
        } else {
-               pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+               pci_enable_wake(pdev, PCI_D3hot, 0);
                pci_disable_device(pdev);
-               pci_set_power_state(pdev, pci_choose_state(pdev, state));
+               pci_set_power_state(pdev, PCI_D3hot);
        }
 
        return 0;
 }
 
-static int sdhci_pci_resume(struct pci_dev *pdev)
+static int sdhci_pci_resume(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
        int i, ret;
@@ -1099,7 +1100,6 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
        struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
-       pm_message_t state = { .event = PM_EVENT_SUSPEND };
        int i, ret;
 
        chip = pci_get_drvdata(pdev);
@@ -1121,7 +1121,7 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
        }
 
        if (chip->fixes && chip->fixes->suspend) {
-               ret = chip->fixes->suspend(chip, state);
+               ret = chip->fixes->suspend(chip);
                if (ret) {
                        for (i = chip->num_slots - 1; i >= 0; i--)
                                sdhci_runtime_resume_host(chip->slots[i]->host);
@@ -1176,6 +1176,8 @@ static int sdhci_pci_runtime_idle(struct device *dev)
 #endif
 
 static const struct dev_pm_ops sdhci_pci_pm_ops = {
+       .suspend = sdhci_pci_suspend,
+       .resume = sdhci_pci_resume,
        .runtime_suspend = sdhci_pci_runtime_suspend,
        .runtime_resume = sdhci_pci_runtime_resume,
        .runtime_idle = sdhci_pci_runtime_idle,
@@ -1428,8 +1430,6 @@ static struct pci_driver sdhci_driver = {
        .id_table =     pci_ids,
        .probe =        sdhci_pci_probe,
        .remove =       __devexit_p(sdhci_pci_remove),
-       .suspend =      sdhci_pci_suspend,
-       .resume =       sdhci_pci_resume,
        .driver =       {
                .pm =   &sdhci_pci_pm_ops
        },
index a9e12ea..03970bc 100644 (file)
@@ -194,21 +194,25 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
 EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
 
 #ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+static int sdhci_pltfm_suspend(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
-       return sdhci_suspend_host(host, state);
+       return sdhci_suspend_host(host);
 }
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
 
-int sdhci_pltfm_resume(struct platform_device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
        return sdhci_resume_host(host);
 }
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+
+const struct dev_pm_ops sdhci_pltfm_pmops = {
+       .suspend        = sdhci_pltfm_suspend,
+       .resume         = sdhci_pltfm_resume,
+};
+EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
 #endif /* CONFIG_PM */
 
 static int __init sdhci_pltfm_drv_init(void)
index 3a9fc3f..37e0e18 100644 (file)
@@ -99,8 +99,10 @@ extern int sdhci_pltfm_register(struct platform_device *pdev,
 extern int sdhci_pltfm_unregister(struct platform_device *pdev);
 
 #ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
-extern int sdhci_pltfm_resume(struct platform_device *dev);
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
+#else
+#define SDHCI_PLTFM_PMOPS NULL
 #endif
 
 #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
index d4bf6d3..7a039c3 100644 (file)
@@ -218,13 +218,10 @@ static struct platform_driver sdhci_pxav2_driver = {
        .driver         = {
                .name   = "sdhci-pxav2",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_pxav2_probe,
        .remove         = __devexit_p(sdhci_pxav2_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 static int __init sdhci_pxav2_init(void)
 {
index cff4ad3..15673a7 100644 (file)
@@ -264,13 +264,10 @@ static struct platform_driver sdhci_pxav3_driver = {
        .driver         = {
                .name   = "sdhci-pxav3",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_pxav3_probe,
        .remove         = __devexit_p(sdhci_pxav3_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 static int __init sdhci_pxav3_init(void)
 {
index 3d00e72..0d33ff0 100644 (file)
@@ -622,33 +622,38 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
 
 #ifdef CONFIG_PM
 
-static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+static int sdhci_s3c_suspend(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
-       return sdhci_suspend_host(host, pm);
+       return sdhci_suspend_host(host);
 }
 
-static int sdhci_s3c_resume(struct platform_device *dev)
+static int sdhci_s3c_resume(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
        return sdhci_resume_host(host);
 }
 
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+       .suspend        = sdhci_s3c_suspend,
+       .resume         = sdhci_s3c_resume,
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
 #else
-#define sdhci_s3c_suspend NULL
-#define sdhci_s3c_resume NULL
+#define SDHCI_S3C_PMOPS NULL
 #endif
 
 static struct platform_driver sdhci_s3c_driver = {
        .probe          = sdhci_s3c_probe,
        .remove         = __devexit_p(sdhci_s3c_remove),
-       .suspend        = sdhci_s3c_suspend,
-       .resume         = sdhci_s3c_resume,
        .driver         = {
                .owner  = THIS_MODULE,
                .name   = "s3c-sdhci",
+               .pm     = SDHCI_S3C_PMOPS,
        },
 };
 
index 89699e8..e2e18d3 100644 (file)
@@ -318,13 +318,10 @@ static struct platform_driver sdhci_tegra_driver = {
                .name   = "sdhci-tegra",
                .owner  = THIS_MODULE,
                .of_match_table = sdhci_tegra_dt_match,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_tegra_probe,
        .remove         = __devexit_p(sdhci_tegra_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_tegra_init(void)
index 6d8eea3..19ed580 100644 (file)
@@ -2327,7 +2327,7 @@ out:
 
 #ifdef CONFIG_PM
 
-int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host)
 {
        int ret;
 
index 0a5b654..a04d4d0 100644 (file)
@@ -374,7 +374,7 @@ extern int sdhci_add_host(struct sdhci_host *host);
 extern void sdhci_remove_host(struct sdhci_host *host, int dead);
 
 #ifdef CONFIG_PM
-extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
+extern int sdhci_suspend_host(struct sdhci_host *host);
 extern int sdhci_resume_host(struct sdhci_host *host);
 extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
 #endif
index 369366c..d5505f3 100644 (file)
@@ -908,7 +908,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                if (host->power) {
                        pm_runtime_put(&host->pd->dev);
                        host->power = false;
-                       if (p->down_pwr)
+                       if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
                                p->down_pwr(host->pd);
                }
                host->state = STATE_IDLE;
index d85a60c..4208b39 100644 (file)
@@ -798,7 +798,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                /* start bus clock */
                tmio_mmc_clk_start(host);
        } else if (ios->power_mode != MMC_POWER_UP) {
-               if (host->set_pwr)
+               if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
                        host->set_pwr(host->pdev, 0);
                if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
                    pdata->power) {
index e8f6e65..2ec978b 100644 (file)
@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
 static int firmware_rom_wait_states = 0x1C;
 #endif
 
-module_param(firmware_rom_wait_states, bool, 0644);
+module_param(firmware_rom_wait_states, int, 0644);
 MODULE_PARM_DESC(firmware_rom_wait_states,
                 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
 
index 94f5534..45876d0 100644 (file)
@@ -227,10 +227,14 @@ static int platram_probe(struct platform_device *pdev)
        if (!err)
                dev_info(&pdev->dev, "registered mtd device\n");
 
-       /* add the whole device. */
-       err = mtd_device_register(info->mtd, NULL, 0);
-       if (err)
-               dev_err(&pdev->dev, "failed to register the entire device\n");
+       if (pdata->nr_partitions) {
+               /* add the whole device. */
+               err = mtd_device_register(info->mtd, NULL, 0);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "failed to register the entire device\n");
+               }
+       }
 
        return err;
 
index 411a17d..2a25b67 100644 (file)
@@ -98,7 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
        }
        info->mtd->owner = THIS_MODULE;
 
-       mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+       mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
 
        platform_set_drvdata(pdev, info);
        return 0;
index 071b634..493ec2f 100644 (file)
@@ -21,9 +21,9 @@
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/mtd/gpmi-nand.h>
 #include <linux/mtd/partitions.h>
-
 #include "gpmi-nand.h"
 
 /* add our owner bbt descriptor */
index ee17139..f8aacf4 100644 (file)
@@ -188,7 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
        if (!flash_np)
                return -ENODEV;
 
-       ppdata->of_node = flash_np;
+       ppdata.of_node = flash_np;
        ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
                        dev_name(&ndfc->ofdev->dev), flash_np->name);
        if (!ndfc->mtd.name) {
index 3216c51..0c0dacb 100644 (file)
@@ -428,27 +428,34 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
  * @bond_dev: bonding net device that got called
  * @vid: vlan id being added
  */
-static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
+       struct slave *slave, *stop_at;
        int i, res;
 
        bond_for_each_slave(bond, slave, i) {
-               struct net_device *slave_dev = slave->dev;
-               const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-               if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
-                   slave_ops->ndo_vlan_rx_add_vid) {
-                       slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid);
-               }
+               res = vlan_vid_add(slave->dev, vid);
+               if (res)
+                       goto unwind;
        }
 
        res = bond_add_vlan(bond, vid);
        if (res) {
                pr_err("%s: Error: Failed to add vlan id %d\n",
                       bond_dev->name, vid);
+               return res;
        }
+
+       return 0;
+
+unwind:
+       /* unwind from head to the slave that failed */
+       stop_at = slave;
+       bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+               vlan_vid_del(slave->dev, vid);
+
+       return res;
 }
 
 /**
@@ -456,56 +463,48 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
  * @bond_dev: bonding net device that got called
  * @vid: vlan id being removed
  */
-static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave;
        int i, res;
 
-       bond_for_each_slave(bond, slave, i) {
-               struct net_device *slave_dev = slave->dev;
-               const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-               if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
-                   slave_ops->ndo_vlan_rx_kill_vid) {
-                       slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
-               }
-       }
+       bond_for_each_slave(bond, slave, i)
+               vlan_vid_del(slave->dev, vid);
 
        res = bond_del_vlan(bond, vid);
        if (res) {
                pr_err("%s: Error: Failed to remove vlan id %d\n",
                       bond_dev->name, vid);
+               return res;
        }
+
+       return 0;
 }
 
 static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
 {
        struct vlan_entry *vlan;
-       const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-       if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
-           !(slave_ops->ndo_vlan_rx_add_vid))
-               return;
+       int res;
 
-       list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
-               slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
+       list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+               res = vlan_vid_add(slave_dev, vlan->vlan_id);
+               if (res)
+                       pr_warning("%s: Failed to add vlan id %d to device %s\n",
+                                  bond->dev->name, vlan->vlan_id,
+                                  slave_dev->name);
+       }
 }
 
 static void bond_del_vlans_from_slave(struct bonding *bond,
                                      struct net_device *slave_dev)
 {
-       const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
        struct vlan_entry *vlan;
 
-       if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
-           !(slave_ops->ndo_vlan_rx_kill_vid))
-               return;
-
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
                if (!vlan->vlan_id)
                        continue;
-               slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
+               vlan_vid_del(slave_dev, vlan->vlan_id);
        }
 }
 
index 0733525..0a4fc62 100644 (file)
@@ -117,15 +117,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
        dev_dbg(&cfhsi->ndev->dev, "%s.\n",
                __func__);
 
-
-       ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
-       if (ret) {
-               dev_warn(&cfhsi->ndev->dev,
-                       "%s: can't wake up HSI interface: %d.\n",
-                       __func__, ret);
-               return ret;
-       }
-
        do {
                ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
                                &fifo_occupancy);
@@ -168,8 +159,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
                }
        } while (1);
 
-       cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
-
        return ret;
 }
 
@@ -944,7 +933,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
 
                /* Create HSI frame. */
                len = cfhsi_tx_frm(desc, cfhsi);
-               BUG_ON(!len);
+               WARN_ON(!len);
 
                /* Set up new transfer. */
                res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
index 23406e6..8a3054b 100644 (file)
@@ -38,15 +38,15 @@ MODULE_ALIAS_LDISC(N_CAIF);
 /*This list is protected by the rtnl lock. */
 static LIST_HEAD(ser_list);
 
-static int ser_loop;
+static bool ser_loop;
 module_param(ser_loop, bool, S_IRUGO);
 MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
 
-static int ser_use_stx = 1;
+static bool ser_use_stx = true;
 module_param(ser_use_stx, bool, S_IRUGO);
 MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
 
-static int ser_use_fcs = 1;
+static bool ser_use_fcs = true;
 
 module_param(ser_use_fcs, bool, S_IRUGO);
 MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
@@ -261,7 +261,7 @@ static int handle_tx(struct ser_device *ser)
                skb_pull(skb, tty_wr);
                if (skb->len == 0) {
                        struct sk_buff *tmp = skb_dequeue(&ser->head);
-                       BUG_ON(tmp != skb);
+                       WARN_ON(tmp != skb);
                        if (in_interrupt())
                                dev_kfree_skb_irq(skb);
                        else
@@ -305,7 +305,7 @@ static void ldisc_tx_wakeup(struct tty_struct *tty)
 
        ser = tty->disc_data;
        BUG_ON(ser == NULL);
-       BUG_ON(ser->tty != tty);
+       WARN_ON(ser->tty != tty);
        handle_tx(ser);
 }
 
index d4b26fb..5b20413 100644 (file)
@@ -238,11 +238,11 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
                if ((avail_emptybuff > HIGH_WATERMARK) &&
                                        (!pshm_drv->tx_empty_available)) {
                        pshm_drv->tx_empty_available = 1;
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
                        pshm_drv->cfdev.flowctrl
                                        (pshm_drv->pshm_dev->pshm_netdev,
                                                                CAIF_FLOW_ON);
 
-                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
 
                        /* Schedule the work queue. if required */
                        if (!work_pending(&pshm_drv->shm_tx_work))
@@ -285,6 +285,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
                        list_entry(pshm_drv->rx_full_list.next, struct buf_list,
                                        list);
                list_del_init(&pbuf->list);
+               spin_unlock_irqrestore(&pshm_drv->lock, flags);
 
                /* Retrieve pointer to start of the packet descriptor area. */
                pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
@@ -336,7 +337,11 @@ static void shm_rx_work_func(struct work_struct *rx_work)
                        /* Get a suitable CAIF packet and copy in data. */
                        skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
                                                        frm_pck_len + 1);
-                       BUG_ON(skb == NULL);
+
+                       if (skb == NULL) {
+                               pr_info("OOM: Try next frame in descriptor\n");
+                               break;
+                       }
 
                        p = skb_put(skb, frm_pck_len);
                        memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
@@ -360,6 +365,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
                        pck_desc++;
                }
 
+               spin_lock_irqsave(&pshm_drv->lock, flags);
                list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
 
                spin_unlock_irqrestore(&pshm_drv->lock, flags);
@@ -412,7 +418,6 @@ static void shm_tx_work_func(struct work_struct *tx_work)
 
                if (skb == NULL)
                        goto send_msg;
-
                /* Check the available no. of buffers in the empty list */
                list_for_each(pos, &pshm_drv->tx_empty_list)
                        avail_emptybuff++;
@@ -421,9 +426,11 @@ static void shm_tx_work_func(struct work_struct *tx_work)
                                        pshm_drv->tx_empty_available) {
                        /* Update blocking condition. */
                        pshm_drv->tx_empty_available = 0;
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
                        pshm_drv->cfdev.flowctrl
                                        (pshm_drv->pshm_dev->pshm_netdev,
                                        CAIF_FLOW_OFF);
+                       spin_lock_irqsave(&pshm_drv->lock, flags);
                }
                /*
                 * We simply return back to the caller if we do not have space
@@ -469,6 +476,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
                        }
 
                        skb = skb_dequeue(&pshm_drv->sk_qhead);
+                       if (skb == NULL)
+                               break;
                        /* Copy in CAIF frame. */
                        skb_copy_bits(skb, 0, pbuf->desc_vptr +
                                        pbuf->frm_ofs + SHM_HDR_LEN +
@@ -477,7 +486,7 @@ static void shm_tx_work_func(struct work_struct *tx_work)
                        pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
                        pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
                                                                        frmlen;
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_irq(skb);
 
                        /* Fill in the shared memory packet descriptor area. */
                        pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
@@ -512,16 +521,11 @@ send_msg:
 static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
 {
        struct shmdrv_layer *pshm_drv;
-       unsigned long flags = 0;
 
        pshm_drv = netdev_priv(shm_netdev);
 
-       spin_lock_irqsave(&pshm_drv->lock, flags);
-
        skb_queue_tail(&pshm_drv->sk_qhead, skb);
 
-       spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
        /* Schedule Tx work queue. for deferred processing of skbs*/
        if (!work_pending(&pshm_drv->shm_tx_work))
                queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
@@ -606,6 +610,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
                pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
                                                (NR_TX_BUF * TX_BUF_SZ);
 
+       spin_lock_init(&pshm_drv->lock);
        INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
        INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
        INIT_LIST_HEAD(&pshm_drv->tx_full_list);
@@ -640,7 +645,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
                tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
 
                if (pshm_dev->shm_loopback)
-                       tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+                       tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
                else
                        tx_buf->desc_vptr =
                                        ioremap(tx_buf->phy_addr, TX_BUF_SZ);
@@ -664,7 +669,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
                rx_buf->len = RX_BUF_SZ;
 
                if (pshm_dev->shm_loopback)
-                       rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+                       rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
                else
                        rx_buf->desc_vptr =
                                        ioremap(rx_buf->phy_addr, RX_BUF_SZ);
index 05e791f..96391c3 100644 (file)
@@ -35,7 +35,7 @@ MODULE_DESCRIPTION("CAIF SPI driver");
 /* Returns the number of padding bytes for alignment. */
 #define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
 
-static int spi_loop;
+static bool spi_loop;
 module_param(spi_loop, bool, S_IRUGO);
 MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
 
@@ -226,7 +226,7 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
                        "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
 
        len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
-                          cfspi->xfer.va_tx,
+                          cfspi->xfer.va_tx[0],
                           (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
 
        len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
@@ -599,48 +599,11 @@ static int cfspi_close(struct net_device *dev)
        netif_stop_queue(dev);
        return 0;
 }
-static const struct net_device_ops cfspi_ops = {
-       .ndo_open = cfspi_open,
-       .ndo_stop = cfspi_close,
-       .ndo_start_xmit = cfspi_xmit
-};
 
-static void cfspi_setup(struct net_device *dev)
+static int cfspi_init(struct net_device *dev)
 {
+       int res = 0;
        struct cfspi *cfspi = netdev_priv(dev);
-       dev->features = 0;
-       dev->netdev_ops = &cfspi_ops;
-       dev->type = ARPHRD_CAIF;
-       dev->flags = IFF_NOARP | IFF_POINTOPOINT;
-       dev->tx_queue_len = 0;
-       dev->mtu = SPI_MAX_PAYLOAD_SIZE;
-       dev->destructor = free_netdev;
-       skb_queue_head_init(&cfspi->qhead);
-       skb_queue_head_init(&cfspi->chead);
-       cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
-       cfspi->cfdev.use_frag = false;
-       cfspi->cfdev.use_stx = false;
-       cfspi->cfdev.use_fcs = false;
-       cfspi->ndev = dev;
-}
-
-int cfspi_spi_probe(struct platform_device *pdev)
-{
-       struct cfspi *cfspi = NULL;
-       struct net_device *ndev;
-       struct cfspi_dev *dev;
-       int res;
-       dev = (struct cfspi_dev *)pdev->dev.platform_data;
-
-       ndev = alloc_netdev(sizeof(struct cfspi),
-                       "cfspi%d", cfspi_setup);
-       if (!ndev)
-               return -ENOMEM;
-
-       cfspi = netdev_priv(ndev);
-       netif_stop_queue(ndev);
-       cfspi->ndev = ndev;
-       cfspi->pdev = pdev;
 
        /* Set flow info. */
        cfspi->flow_off_sent = 0;
@@ -656,16 +619,11 @@ int cfspi_spi_probe(struct platform_device *pdev)
                cfspi->slave_talked = false;
        }
 
-       /* Assign the SPI device. */
-       cfspi->dev = dev;
-       /* Assign the device ifc to this SPI interface. */
-       dev->ifc = &cfspi->ifc;
-
        /* Allocate DMA buffers. */
-       cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
-       if (!cfspi->xfer.va_tx) {
+       cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
+       if (!cfspi->xfer.va_tx[0]) {
                res = -ENODEV;
-               goto err_dma_alloc_tx;
+               goto err_dma_alloc_tx_0;
        }
 
        cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
@@ -714,6 +672,87 @@ int cfspi_spi_probe(struct platform_device *pdev)
        /* Schedule the work queue. */
        queue_work(cfspi->wq, &cfspi->work);
 
+       return 0;
+
+ err_create_wq:
+       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ err_dma_alloc_rx:
+       dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ err_dma_alloc_tx_0:
+       return res;
+}
+
+static void cfspi_uninit(struct net_device *dev)
+{
+       struct cfspi *cfspi = netdev_priv(dev);
+
+       /* Remove from list. */
+       spin_lock(&cfspi_list_lock);
+       list_del(&cfspi->list);
+       spin_unlock(&cfspi_list_lock);
+
+       cfspi->ndev = NULL;
+       /* Free DMA buffers. */
+       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+       dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+       set_bit(SPI_TERMINATE, &cfspi->state);
+       wake_up_interruptible(&cfspi->wait);
+       destroy_workqueue(cfspi->wq);
+       /* Destroy debugfs directory and files. */
+       dev_debugfs_rem(cfspi);
+       return;
+}
+
+static const struct net_device_ops cfspi_ops = {
+       .ndo_open = cfspi_open,
+       .ndo_stop = cfspi_close,
+       .ndo_init = cfspi_init,
+       .ndo_uninit = cfspi_uninit,
+       .ndo_start_xmit = cfspi_xmit
+};
+
+static void cfspi_setup(struct net_device *dev)
+{
+       struct cfspi *cfspi = netdev_priv(dev);
+       dev->features = 0;
+       dev->netdev_ops = &cfspi_ops;
+       dev->type = ARPHRD_CAIF;
+       dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+       dev->tx_queue_len = 0;
+       dev->mtu = SPI_MAX_PAYLOAD_SIZE;
+       dev->destructor = free_netdev;
+       skb_queue_head_init(&cfspi->qhead);
+       skb_queue_head_init(&cfspi->chead);
+       cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+       cfspi->cfdev.use_frag = false;
+       cfspi->cfdev.use_stx = false;
+       cfspi->cfdev.use_fcs = false;
+       cfspi->ndev = dev;
+}
+
+int cfspi_spi_probe(struct platform_device *pdev)
+{
+       struct cfspi *cfspi = NULL;
+       struct net_device *ndev;
+       struct cfspi_dev *dev;
+       int res;
+       dev = (struct cfspi_dev *)pdev->dev.platform_data;
+
+       ndev = alloc_netdev(sizeof(struct cfspi),
+                       "cfspi%d", cfspi_setup);
+       if (!dev)
+               return -ENODEV;
+
+       cfspi = netdev_priv(ndev);
+       netif_stop_queue(ndev);
+       cfspi->ndev = ndev;
+       cfspi->pdev = pdev;
+
+       /* Assign the SPI device. */
+       cfspi->dev = dev;
+       /* Assign the device ifc to this SPI interface. */
+       dev->ifc = &cfspi->ifc;
+
        /* Register network device. */
        res = register_netdev(ndev);
        if (res) {
@@ -723,15 +762,6 @@ int cfspi_spi_probe(struct platform_device *pdev)
        return res;
 
  err_net_reg:
-       dev_debugfs_rem(cfspi);
-       set_bit(SPI_TERMINATE, &cfspi->state);
-       wake_up_interruptible(&cfspi->wait);
-       destroy_workqueue(cfspi->wq);
- err_create_wq:
-       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- err_dma_alloc_rx:
-       dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- err_dma_alloc_tx:
        free_netdev(ndev);
 
        return res;
@@ -739,34 +769,8 @@ int cfspi_spi_probe(struct platform_device *pdev)
 
 int cfspi_spi_remove(struct platform_device *pdev)
 {
-       struct list_head *list_node;
-       struct list_head *n;
-       struct cfspi *cfspi = NULL;
-       struct cfspi_dev *dev;
-
-       dev = (struct cfspi_dev *)pdev->dev.platform_data;
-       spin_lock(&cfspi_list_lock);
-       list_for_each_safe(list_node, n, &cfspi_list) {
-               cfspi = list_entry(list_node, struct cfspi, list);
-               /* Find the corresponding device. */
-               if (cfspi->dev == dev) {
-                       /* Remove from list. */
-                       list_del(list_node);
-                       /* Free DMA buffers. */
-                       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
-                       dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
-                       set_bit(SPI_TERMINATE, &cfspi->state);
-                       wake_up_interruptible(&cfspi->wait);
-                       destroy_workqueue(cfspi->wq);
-                       /* Destroy debugfs directory and files. */
-                       dev_debugfs_rem(cfspi);
-                       unregister_netdev(cfspi->ndev);
-                       spin_unlock(&cfspi_list_lock);
-                       return 0;
-               }
-       }
-       spin_unlock(&cfspi_list_lock);
-       return -ENODEV;
+       /* Everything is done in cfspi_uninit(). */
+       return 0;
 }
 
 static void __exit cfspi_exit_module(void)
@@ -777,7 +781,7 @@ static void __exit cfspi_exit_module(void)
 
        list_for_each_safe(list_node, n, &cfspi_list) {
                cfspi = list_entry(list_node, struct cfspi, list);
-               platform_device_unregister(cfspi->pdev);
+               unregister_netdev(cfspi->ndev);
        }
 
        /* Destroy sysfs files. */
index f93e2d6..ea2d942 100644 (file)
@@ -63,7 +63,7 @@ MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
  * See Documentation/networking/can.txt for details.
  */
 
-static int echo; /* echo testing. Default: 0 (Off) */
+static bool echo; /* echo testing. Default: 0 (Off) */
 module_param(echo, bool, S_IRUGO);
 MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
 
index a446e25..cb4f38a 100644 (file)
@@ -607,7 +607,7 @@ static const struct ethtool_ops ethtool_ops;
 
 
 #ifdef VLAN_SUPPORT
-static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct netdev_private *np = netdev_priv(dev);
 
@@ -617,9 +617,11 @@ static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        set_bit(vid, np->active_vlans);
        set_rx_mode(dev);
        spin_unlock(&np->lock);
+
+       return 0;
 }
 
-static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct netdev_private *np = netdev_priv(dev);
 
@@ -629,6 +631,8 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        clear_bit(vid, np->active_vlans);
        set_rx_mode(dev);
        spin_unlock(&np->lock);
+
+       return 0;
 }
 #endif /* VLAN_SUPPORT */
 
index 5bbb53a..8baa352 100644 (file)
@@ -807,8 +807,8 @@ typedef enum {
 
 static int card_idx;
 static int speed_duplex[MAX_UNITS] = { 0, };
-static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1};
-static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0};
+static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true };
+static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false };
 static unsigned int chip_version;
 
 #endif /* _AMD8111E_H */
index 787e175..021fb81 100644 (file)
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME                "bnx2"
-#define DRV_MODULE_VERSION     "2.1.11"
-#define DRV_MODULE_RELDATE     "July 20, 2011"
-#define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-6.2.1.fw"
+#define DRV_MODULE_VERSION     "2.2.1"
+#define DRV_MODULE_RELDATE     "Dec 18, 2011"
+#define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-6.2.3.fw"
 #define FW_RV2P_FILE_06                "bnx2/bnx2-rv2p-06-6.0.15.fw"
-#define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-6.2.1a.fw"
+#define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-6.2.1b.fw"
 #define FW_RV2P_FILE_09_Ax     "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
 #define FW_RV2P_FILE_09                "bnx2/bnx2-rv2p-09-6.0.17.fw"
 
index 6c7bd63..8c73d34 100644 (file)
@@ -887,6 +887,8 @@ struct bnx2x_common {
 #define CHIP_PORT_MODE_NONE                    0x2
 #define CHIP_MODE(bp)                  (bp->common.chip_port_mode)
 #define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+
+       u32                     boot_mode;
 };
 
 /* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
@@ -1048,6 +1050,8 @@ struct bnx2x_slowpath {
 
        u32                             wb_comp;
        u32                             wb_data[4];
+
+       union drv_info_to_mcp           drv_info_to_mcp;
 };
 
 #define bnx2x_sp(bp, var)              (&bp->slowpath->var)
@@ -1128,18 +1132,21 @@ enum {
 enum {
        BNX2X_PORT_QUERY_IDX,
        BNX2X_PF_QUERY_IDX,
+       BNX2X_FCOE_QUERY_IDX,
        BNX2X_FIRST_QUEUE_QUERY_IDX,
 };
 
 struct bnx2x_fw_stats_req {
        struct stats_query_header hdr;
-       struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+       struct stats_query_entry query[FP_SB_MAX_E1x+
+               BNX2X_FIRST_QUEUE_QUERY_IDX];
 };
 
 struct bnx2x_fw_stats_data {
        struct stats_counter    storm_counters;
        struct per_port_stats   port;
        struct per_pf_stats     pf;
+       struct fcoe_statistics_params   fcoe;
        struct per_queue_stats  queue_stats[1];
 };
 
@@ -1266,6 +1273,7 @@ struct bnx2x {
 #define NO_ISCSI_OOO_FLAG              (1 << 13)
 #define NO_ISCSI_FLAG                  (1 << 14)
 #define NO_FCOE_FLAG                   (1 << 15)
+#define BC_SUPPORTS_PFC_STATS          (1 << 17)
 
 #define NO_ISCSI(bp)           ((bp)->flags & NO_ISCSI_FLAG)
 #define NO_ISCSI_OOO(bp)       ((bp)->flags & NO_ISCSI_OOO_FLAG)
index 477bc97..2b731b2 100644 (file)
@@ -731,22 +731,22 @@ reuse_rx:
                                bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
                                goto next_rx;
                        }
+               }
 
-                       skb_put(skb, len);
-                       skb->protocol = eth_type_trans(skb, bp->dev);
+               skb_put(skb, len);
+               skb->protocol = eth_type_trans(skb, bp->dev);
 
-                       /* Set Toeplitz hash for a none-LRO skb */
-                       skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
+               /* Set Toeplitz hash for a none-LRO skb */
+               skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
 
-                       skb_checksum_none_assert(skb);
+               skb_checksum_none_assert(skb);
 
-                       if (bp->dev->features & NETIF_F_RXCSUM) {
+               if (bp->dev->features & NETIF_F_RXCSUM) {
 
-                               if (likely(BNX2X_RX_CSUM_OK(cqe)))
-                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                               else
-                                       fp->eth_q_stats.hw_csum_err++;
-                       }
+                       if (likely(BNX2X_RX_CSUM_OK(cqe)))
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       else
+                               fp->eth_q_stats.hw_csum_err++;
                }
 
                skb_record_rx_queue(skb, fp->rx_queue);
@@ -1545,7 +1545,8 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
        if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
                for (i = 0; i < sizeof(ind_table); i++)
                        ind_table[i] =
-                               bp->fp->cl_id + (i % num_eth_queues);
+                               bp->fp->cl_id +
+                               ethtool_rxfh_indir_default(i, num_eth_queues);
        }
 
        /*
index 2891cdc..bf27c54 100644 (file)
@@ -1491,7 +1491,6 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
        return max_cfg;
 }
 
-#ifdef BCM_CNIC
 /**
  * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
  *
@@ -1499,7 +1498,6 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
  *
  */
 void bnx2x_get_iscsi_info(struct bnx2x *bp);
-#endif
 
 /* returns func by VN for current port */
 static inline int func_by_vn(struct bnx2x *bp, int vn)
index c679ed9..a688b9d 100644 (file)
@@ -107,6 +107,10 @@ static const struct {
                                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
        { STATS_OFFSET32(mf_tag_discard),
                                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+       { STATS_OFFSET32(pfc_frames_received_hi),
+                               8, STATS_FLAGS_PORT, "pfc_frames_received" },
+       { STATS_OFFSET32(pfc_frames_sent_hi),
+                               8, STATS_FLAGS_PORT, "pfc_frames_sent" },
        { STATS_OFFSET32(brb_drop_hi),
                                8, STATS_FLAGS_PORT, "rx_brb_discard" },
        { STATS_OFFSET32(brb_truncate_hi),
@@ -1741,6 +1745,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        u16 len;
        int rc = -ENODEV;
        u8 *data;
+       struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
 
        /* check the loopback mode */
        switch (loopback_mode) {
@@ -1795,6 +1800,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
        rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
 
+       netdev_tx_sent_queue(txq, skb->len);
+
        pkt_prod = txdata->tx_pkt_prod++;
        tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
        tx_buf->first_bd = txdata->tx_bd_prod;
@@ -2295,18 +2302,20 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
        }
 }
 
-static int bnx2x_get_rxfh_indir(struct net_device *dev,
-                               struct ethtool_rxfh_indir *indir)
+static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
+               0 : T_ETH_INDIRECTION_TABLE_SIZE);
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       size_t copy_size =
-               min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
        u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
        size_t i;
 
-       if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
-               return -EOPNOTSUPP;
-
        /* Get the current configuration of the RSS indirection table */
        bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
 
@@ -2319,33 +2328,19 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev,
         * align the returned table to the Client ID of the leading RSS
         * queue.
         */
-       for (i = 0; i < copy_size; i++)
-               indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
-
-       indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+       for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
+               indir[i] = ind_table[i] - bp->fp->cl_id;
 
        return 0;
 }
 
-static int bnx2x_set_rxfh_indir(struct net_device *dev,
-                               const struct ethtool_rxfh_indir *indir)
+static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
 {
        struct bnx2x *bp = netdev_priv(dev);
        size_t i;
        u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
-       u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
-
-       if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
-               return -EOPNOTSUPP;
-
-       /* validate the size */
-       if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
-               return -EINVAL;
 
        for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
-               /* validate the indices */
-               if (indir->ring_index[i] >= num_eth_queues)
-                       return -EINVAL;
                /*
                 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
                 * as an internal storage of an indirection table is a u8 array
@@ -2355,7 +2350,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev,
                 * align the received table to the Client ID of the leading RSS
                 * queue
                 */
-               ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+               ind_table[i] = indir[i] + bp->fp->cl_id;
        }
 
        return bnx2x_config_rss_pf(bp, ind_table, false);
@@ -2388,6 +2383,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
        .set_phys_id            = bnx2x_set_phys_id,
        .get_ethtool_stats      = bnx2x_get_ethtool_stats,
        .get_rxnfc              = bnx2x_get_rxnfc,
+       .get_rxfh_indir_size    = bnx2x_get_rxfh_indir_size,
        .get_rxfh_indir         = bnx2x_get_rxfh_indir,
        .set_rxfh_indir         = bnx2x_set_rxfh_indir,
 };
index fc754cb..3e30c86 100644 (file)
@@ -1247,11 +1247,14 @@ struct drv_func_mb {
        #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL  0xa1000000
        #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL  0x00050234
        #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED   0x00070014
+       #define REQ_BC_VER_4_PFC_STATS_SUPPORTED        0x00070201
 
        #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG         0xb0000000
        #define DRV_MSG_CODE_DCBX_PMF_DRV_OK            0xb2000000
 
        #define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+       #define DRV_MSG_CODE_DRV_INFO_ACK               0xd8000000
+       #define DRV_MSG_CODE_DRV_INFO_NACK              0xd9000000
 
        #define DRV_MSG_CODE_SET_MF_BW                  0xe0000000
        #define REQ_BC_VER_4_SET_MF_BW                  0x00060202
@@ -1304,6 +1307,8 @@ struct drv_func_mb {
        #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG      0xa0200000
        #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED     0xa0300000
        #define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+       #define FW_MSG_CODE_DRV_INFO_ACK                0xd8100000
+       #define FW_MSG_CODE_DRV_INFO_NACK               0xd9100000
 
        #define FW_MSG_CODE_SET_MF_BW_SENT              0xe0000000
        #define FW_MSG_CODE_SET_MF_BW_DONE              0xe1000000
@@ -1360,6 +1365,7 @@ struct drv_func_mb {
 
        #define DRV_STATUS_DCBX_EVENT_MASK              0x000f0000
        #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS     0x00010000
+       #define DRV_STATUS_DRV_INFO_REQ                 0x04000000
 
        u32 virt_mac_upper;
        #define VIRT_MAC_SIGN_MASK                      0xffff0000
@@ -1964,9 +1970,38 @@ struct shmem2_region {
        u32 extended_dev_info_shared_addr;
        u32 ncsi_oem_data_addr;
 
-       u32 ocsd_host_addr;
-       u32 ocbb_host_addr;
-       u32 ocsd_req_update_interval;
+       u32 ocsd_host_addr; /* initialized by option ROM */
+       u32 ocbb_host_addr; /* initialized by option ROM */
+       u32 ocsd_req_update_interval; /* initialized by option ROM */
+       u32 temperature_in_half_celsius;
+       u32 glob_struct_in_host;
+
+       u32 dcbx_neg_res_ext_offset;
+#define SHMEM_DCBX_NEG_RES_EXT_NONE                    0x00000000
+
+       u32 drv_capabilities_flag[E2_FUNC_MAX];
+#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+#define DRV_FLAGS_CAPABILITIES_LOADED_L2        0x00000002
+#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE      0x00000004
+#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI     0x00000008
+
+       u32 extended_dev_info_shared_cfg_size;
+
+       u32 dcbx_en[PORT_MAX];
+
+       /* The offset points to the multi threaded meta structure */
+       u32 multi_thread_data_offset;
+
+       /* address of DMAable host address holding values from the drivers */
+       u32 drv_info_host_addr_lo;
+       u32 drv_info_host_addr_hi;
+
+       /* general values written by the MFW (such as current version) */
+       u32 drv_info_control;
+#define DRV_INFO_CONTROL_VER_MASK          0x000000ff
+#define DRV_INFO_CONTROL_VER_SHIFT         0
+#define DRV_INFO_CONTROL_OP_CODE_MASK      0x0000ff00
+#define DRV_INFO_CONTROL_OP_CODE_SHIFT     8
 };
 
 
@@ -2501,14 +2536,18 @@ struct mac_stx {
 #define MAC_STX_IDX_MAX                     2
 
 struct host_port_stats {
-       u32            host_port_stats_start;
+       u32            host_port_stats_counter;
 
        struct mac_stx mac_stx[MAC_STX_IDX_MAX];
 
        u32            brb_drop_hi;
        u32            brb_drop_lo;
 
-       u32            host_port_stats_end;
+       u32            not_used; /* obsolete */
+       u32            pfc_frames_tx_hi;
+       u32            pfc_frames_tx_lo;
+       u32            pfc_frames_rx_hi;
+       u32            pfc_frames_rx_lo;
 };
 
 
@@ -2548,6 +2587,118 @@ struct host_func_stats {
 /* VIC definitions */
 #define VICSTATST_UIF_INDEX 2
 
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 1
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+       ETH_STATS_OPCODE,
+       FCOE_STATS_OPCODE,
+       ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN      12
+/*  Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+       /* Function's Driver Version. padded to 12 */
+       u8 version[ETH_STAT_INFO_VERSION_LEN];
+       /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+       u8 mac_local[8];
+       u8 mac_add1[8];         /* Additional Programmed MAC Addr 1. */
+       u8 mac_add2[8];         /* Additional Programmed MAC Addr 2. */
+       u32 mtu_size;           /* MTU Size. Note   : Negotiated MTU */
+       u32 feature_flags;      /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK                0x01
+#define FEATURE_ETH_LSO_MASK                   0x02
+#define FEATURE_ETH_BOOTMODE_MASK              0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT             2
+#define FEATURE_ETH_BOOTMODE_NONE              (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE               (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI             (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE              (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK                   0x20
+       u32 lso_max_size;       /* LSO MaxOffloadSize. */
+       u32 lso_min_seg_cnt;    /* LSO MinSegmentCount. */
+       /* Num Offloaded Connections TCP_IPv4. */
+       u32 ipv4_ofld_cnt;
+       /* Num Offloaded Connections TCP_IPv6. */
+       u32 ipv6_ofld_cnt;
+       u32 promiscuous_mode;   /* Promiscuous Mode. non-zero true */
+       u32 txq_size;           /* TX Descriptors Queue Size */
+       u32 rxq_size;           /* RX Descriptors Queue Size */
+       /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+       u32 txq_avg_depth;
+       /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+       u32 rxq_avg_depth;
+       /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+       u32 iov_offload;
+       /* Number of NetQueue/VMQ Config'd. */
+       u32 netq_cnt;
+       u32 vf_cnt;             /* Num VF assigned to this PF. */
+};
+
+/*  Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+       u8 version[12];         /* Function's Driver Version. */
+       u8 mac_local[8];        /* Locally Admin Addr. */
+       u8 mac_add1[8];         /* Additional Programmed MAC Addr 1. */
+       u8 mac_add2[8];         /* Additional Programmed MAC Addr 2. */
+       /* QoS Priority (per 802.1p). 0-7255 */
+       u32 qos_priority;
+       u32 txq_size;           /* FCoE TX Descriptors Queue Size. */
+       u32 rxq_size;           /* FCoE RX Descriptors Queue Size. */
+       /* FCoE TX Descriptor Queue Avg Depth. */
+       u32 txq_avg_depth;
+       /* FCoE RX Descriptors Queue Avg Depth. */
+       u32 rxq_avg_depth;
+       u32 rx_frames_lo;       /* FCoE RX Frames received. */
+       u32 rx_frames_hi;       /* FCoE RX Frames received. */
+       u32 rx_bytes_lo;        /* FCoE RX Bytes received. */
+       u32 rx_bytes_hi;        /* FCoE RX Bytes received. */
+       u32 tx_frames_lo;       /* FCoE TX Frames sent. */
+       u32 tx_frames_hi;       /* FCoE TX Frames sent. */
+       u32 tx_bytes_lo;        /* FCoE TX Bytes sent. */
+       u32 tx_bytes_hi;        /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI  Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+       u8 version[12];         /* Function's Driver Version. */
+       u8 mac_local[8];        /* Locally Admin iSCSI MAC Addr. */
+       u8 mac_add1[8];         /* Additional Programmed MAC Addr 1. */
+       /* QoS Priority (per 802.1p). 0-7255 */
+       u32 qos_priority;
+       u8 initiator_name[64];  /* iSCSI Boot Initiator Node name. */
+       u8 ww_port_name[64];    /* iSCSI World wide port name */
+       u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+       u8 boot_target_ip[16];  /* iSCSI Boot Target IP. */
+       u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+       u8 boot_init_ip[16];    /* iSCSI Boot Initiator IP Address. */
+       u32 max_frame_size;     /* Max Frame Size. bytes */
+       u32 txq_size;           /* PDU TX Descriptors Queue Size. */
+       u32 rxq_size;           /* PDU RX Descriptors Queue Size. */
+       u32 txq_avg_depth;      /* PDU TX Descriptor Queue Avg Depth. */
+       u32 rxq_avg_depth;      /* PDU RX Descriptors Queue Avg Depth. */
+       u32 rx_pdus_lo;         /* iSCSI PDUs received. */
+       u32 rx_pdus_hi;         /* iSCSI PDUs received. */
+       u32 rx_bytes_lo;        /* iSCSI RX Bytes received. */
+       u32 rx_bytes_hi;        /* iSCSI RX Bytes received. */
+       u32 tx_pdus_lo;         /* iSCSI PDUs sent. */
+       u32 tx_pdus_hi;         /* iSCSI PDUs sent. */
+       u32 tx_bytes_lo;        /* iSCSI PDU TX Bytes sent. */
+       u32 tx_bytes_hi;        /* iSCSI PDU TX Bytes sent. */
+       u32 pcp_prior_map_tbl;  /* C-PCP to S-PCP Priority MapTable.
+                                * 9 nibbles, the position of each nibble
+                                * represents the C-PCP value, the value
+                                * of the nibble = S-PCP value.
+                                */
+};
+
+union drv_info_to_mcp {
+       struct eth_stats_info   ether_stat;
+       struct fcoe_stats_info  fcoe_stat;
+       struct iscsi_stats_info iscsi_stat;
+};
 #define BCM_5710_FW_MAJOR_VERSION                      7
 #define BCM_5710_FW_MINOR_VERSION                      0
 #define BCM_5710_FW_REVISION_VERSION           29
@@ -4161,8 +4312,62 @@ struct ustorm_eth_rx_producers {
 
 
 /*
- * cfc delete event data
+ * FCoE RX statistics parameters section#0
  */
+struct fcoe_rx_stat_params_section0 {
+       __le32 fcoe_rx_pkt_cnt;
+       __le32 fcoe_rx_byte_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+       __le32 fcoe_ver_cnt;
+       __le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2
+ */
+struct fcoe_rx_stat_params_section2 {
+       __le32 fc_crc_cnt;
+       __le32 eofa_del_cnt;
+       __le32 miss_frame_cnt;
+       __le32 seq_timeout_cnt;
+       __le32 drop_seq_cnt;
+       __le32 fcoe_rx_drop_pkt_cnt;
+       __le32 fcp_rx_pkt_cnt;
+       __le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+       __le32 fcoe_tx_pkt_cnt;
+       __le32 fcoe_tx_byte_cnt;
+       __le32 fcp_tx_pkt_cnt;
+       __le32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+       struct fcoe_tx_stat_params tx_stat;
+       struct fcoe_rx_stat_params_section0 rx_stat0;
+       struct fcoe_rx_stat_params_section1 rx_stat1;
+       struct fcoe_rx_stat_params_section2 rx_stat2;
+};
+
+
+/*
+ * cfc delete event data
+*/
 struct cfc_del_event_data {
        u32 cid;
        u32 reserved0;
index 2213e0b..ffeaaa9 100644 (file)
@@ -2624,15 +2624,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
        return rc;
 }
 
-static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
-{
-#ifdef BCM_CNIC
-       /* Statistics are not supported for CNIC Clients at the moment */
-       if (IS_FCOE_FP(fp))
-               return false;
-#endif
-       return true;
-}
 
 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 {
@@ -2676,11 +2667,11 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
         *  parent connection). The statistics are zeroed when the parent
         *  connection is initialized.
         */
-       if (stat_counter_valid(bp, fp)) {
-               __set_bit(BNX2X_Q_FLG_STATS, &flags);
-               if (zero_stats)
-                       __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
-       }
+
+       __set_bit(BNX2X_Q_FLG_STATS, &flags);
+       if (zero_stats)
+               __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
 
        return flags;
 }
@@ -2921,6 +2912,143 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
         */
 }
 
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+       struct eth_stats_info *ether_stat =
+               &bp->slowpath->drv_info_to_mcp.ether_stat;
+
+       /* leave last char as NULL */
+       memcpy(ether_stat->version, DRV_MODULE_VERSION,
+              ETH_STAT_INFO_VERSION_LEN - 1);
+
+       bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
+                                        DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+                                        ether_stat->mac_local);
+
+       ether_stat->mtu_size = bp->dev->mtu;
+
+       if (bp->dev->features & NETIF_F_RXCSUM)
+               ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+       if (bp->dev->features & NETIF_F_TSO)
+               ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+       ether_stat->feature_flags |= bp->common.boot_mode;
+
+       ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+       ether_stat->txq_size = bp->tx_ring_size;
+       ether_stat->rxq_size = bp->rx_ring_size;
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+       struct fcoe_stats_info *fcoe_stat =
+               &bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+       memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+
+       fcoe_stat->qos_priority =
+               app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+       /* insert FCoE stats from ramrod response */
+       if (!NO_FCOE(bp)) {
+               struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+                       &bp->fw_stats_data->queue_stats[FCOE_IDX].
+                       tstorm_queue_statistics;
+
+               struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+                       &bp->fw_stats_data->queue_stats[FCOE_IDX].
+                       xstorm_queue_statistics;
+
+               struct fcoe_statistics_params *fw_fcoe_stat =
+                       &bp->fw_stats_data->fcoe;
+
+               ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
+                      fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+               ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
+                      fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->mcast_pkts_sent);
+       }
+
+       /* ask L5 driver to add data to the struct */
+       bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+#endif
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+       struct iscsi_stats_info *iscsi_stat =
+               &bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+       memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+
+       iscsi_stat->qos_priority =
+               app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+       /* ask L5 driver to add data to the struct */
+       bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+#endif
+}
+
 /* called due to MCP event (on pmf):
  *     reread new bandwidth configuration
  *     configure FW
@@ -2941,6 +3069,50 @@ static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
        bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
 }
 
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+       enum drv_info_opcode op_code;
+       u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+
+       /* if drv_info version supported by MFW doesn't match - send NACK */
+       if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+               return;
+       }
+
+       op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+                 DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+       memset(&bp->slowpath->drv_info_to_mcp, 0,
+              sizeof(union drv_info_to_mcp));
+
+       switch (op_code) {
+       case ETH_STATS_OPCODE:
+               bnx2x_drv_info_ether_stat(bp);
+               break;
+       case FCOE_STATS_OPCODE:
+               bnx2x_drv_info_fcoe_stat(bp);
+               break;
+       case ISCSI_STATS_OPCODE:
+               bnx2x_drv_info_iscsi_stat(bp);
+               break;
+       default:
+               /* if op code isn't supported - send NACK */
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+               return;
+       }
+
+       /* if we got drv_info attn from MFW then these fields are defined in
+        * shmem2 for sure
+        */
+       SHMEM2_WR(bp, drv_info_host_addr_lo,
+               U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+       SHMEM2_WR(bp, drv_info_host_addr_hi,
+               U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+       bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+}
+
 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 {
        DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -3448,6 +3620,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                        if (val & DRV_STATUS_SET_MF_BW)
                                bnx2x_set_mf_bw(bp);
 
+                       if (val & DRV_STATUS_DRV_INFO_REQ)
+                               bnx2x_handle_drv_info_req(bp);
                        if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
                                bnx2x_pmf_update(bp);
 
@@ -6848,13 +7022,16 @@ void bnx2x_free_mem(struct bnx2x *bp)
 static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
 {
        int num_groups;
+       int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
 
-       /* number of eth_queues */
-       u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+       /* number of queues for statistics is number of eth queues + FCoE */
+       u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
 
        /* Total number of FW statistics requests =
-        * 1 for port stats + 1 for PF stats + num_eth_queues */
-       bp->fw_stats_num = 2 + num_queue_stats;
+        * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
+        * num of queues
+        */
+       bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
 
 
        /* Request is built from stats_query_header and an array of
@@ -6862,8 +7039,8 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
         * STATS_QUERY_CMD_COUNT rules. The real number or requests is
         * configured in the stats_query_header.
         */
-       num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
-               (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+       num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
+                    (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
 
        bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
                        num_groups * sizeof(struct stats_query_cmd_group);
@@ -6872,9 +7049,13 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
         *
         * stats_counter holds per-STORM counters that are incremented
         * when STORM has finished with the current request.
+        *
+        * memory for FCoE offloaded statistics are counted anyway,
+        * even if they will not be sent.
         */
        bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
                sizeof(struct per_pf_stats) +
+               sizeof(struct fcoe_statistics_params) +
                sizeof(struct per_queue_stats) * num_queue_stats +
                sizeof(struct stats_counter);
 
@@ -8718,7 +8899,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
 
 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 {
-       u32 val, val2, val3, val4, id;
+       u32 val, val2, val3, val4, id, boot_mode;
        u16 pmc;
 
        /* Get the chip revision id and number. */
@@ -8827,6 +9008,26 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
        bp->link_params.feature_config_flags |=
                (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
                FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+       bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+                       BC_SUPPORTS_PFC_STATS : 0;
+
+       boot_mode = SHMEM_RD(bp,
+                       dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+                       PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+       switch (boot_mode) {
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+               break;
+       }
 
        pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
        bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
@@ -9277,9 +9478,9 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
                                                        bp->common.shmem2_base);
 }
 
-#ifdef BCM_CNIC
 void bnx2x_get_iscsi_info(struct bnx2x *bp)
 {
+#ifdef BCM_CNIC
        int port = BP_PORT(bp);
 
        u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
@@ -9299,10 +9500,14 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
         */
        if (!bp->cnic_eth_dev.max_iscsi_conn)
                bp->flags |= NO_ISCSI_FLAG;
+#else
+       bp->flags |= NO_ISCSI_FLAG;
+#endif
 }
 
 static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
 {
+#ifdef BCM_CNIC
        int port = BP_PORT(bp);
        int func = BP_ABS_FUNC(bp);
 
@@ -9369,6 +9574,9 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
         */
        if (!bp->cnic_eth_dev.max_fcoe_conn)
                bp->flags |= NO_FCOE_FLAG;
+#else
+       bp->flags |= NO_FCOE_FLAG;
+#endif
 }
 
 static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -9381,7 +9589,6 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
        bnx2x_get_iscsi_info(bp);
        bnx2x_get_fcoe_info(bp);
 }
-#endif
 
 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 {
@@ -9706,9 +9913,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
        /* Get MAC addresses */
        bnx2x_get_mac_hwinfo(bp);
 
-#ifdef BCM_CNIC
        bnx2x_get_cnic_info(bp);
-#endif
 
        /* Get current FW pulse sequence */
        if (!BP_NOMCP(bp)) {
@@ -9726,30 +9931,49 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
 {
        int cnt, i, block_end, rodi;
-       char vpd_data[BNX2X_VPD_LEN+1];
+       char vpd_start[BNX2X_VPD_LEN+1];
        char str_id_reg[VENDOR_ID_LEN+1];
        char str_id_cap[VENDOR_ID_LEN+1];
+       char *vpd_data;
+       char *vpd_extended_data = NULL;
        u8 len;
 
-       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
        memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
 
        if (cnt < BNX2X_VPD_LEN)
                goto out_not_found;
 
-       i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+       /* VPD RO tag should be first tag after identifier string, hence
+        * we should be able to find it in first BNX2X_VPD_LEN chars
+        */
+       i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
                             PCI_VPD_LRDT_RO_DATA);
        if (i < 0)
                goto out_not_found;
 
-
        block_end = i + PCI_VPD_LRDT_TAG_SIZE +
-                   pci_vpd_lrdt_size(&vpd_data[i]);
+                   pci_vpd_lrdt_size(&vpd_start[i]);
 
        i += PCI_VPD_LRDT_TAG_SIZE;
 
-       if (block_end > BNX2X_VPD_LEN)
-               goto out_not_found;
+       if (block_end > BNX2X_VPD_LEN) {
+               vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+               if (vpd_extended_data  == NULL)
+                       goto out_not_found;
+
+               /* read rest of vpd image into vpd_extended_data */
+               memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
+               cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
+                                  block_end - BNX2X_VPD_LEN,
+                                  vpd_extended_data + BNX2X_VPD_LEN);
+               if (cnt < (block_end - BNX2X_VPD_LEN))
+                       goto out_not_found;
+               vpd_data = vpd_extended_data;
+       } else
+               vpd_data = vpd_start;
+
+       /* now vpd_data holds full vpd content in both cases */
 
        rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
                                   PCI_VPD_RO_KEYWORD_MFR_ID);
@@ -9781,9 +10005,11 @@ static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
                                bp->fw_ver[len] = ' ';
                        }
                }
+               kfree(vpd_extended_data);
                return;
        }
 out_not_found:
+       kfree(vpd_extended_data);
        return;
 }
 
@@ -11550,6 +11776,38 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
                smp_mb__after_atomic_inc();
                break;
        }
+       case DRV_CTL_ULP_REGISTER_CMD: {
+               int ulp_type = ctl->data.ulp_type;
+
+               if (CHIP_IS_E3(bp)) {
+                       int idx = BP_FW_MB_IDX(bp);
+                       u32 cap;
+
+                       cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+                       if (ulp_type == CNIC_ULP_ISCSI)
+                               cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+                       else if (ulp_type == CNIC_ULP_FCOE)
+                               cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+                       SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+               }
+               break;
+       }
+       case DRV_CTL_ULP_UNREGISTER_CMD: {
+               int ulp_type = ctl->data.ulp_type;
+
+               if (CHIP_IS_E3(bp)) {
+                       int idx = BP_FW_MB_IDX(bp);
+                       u32 cap;
+
+                       cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+                       if (ulp_type == CNIC_ULP_ISCSI)
+                               cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+                       else if (ulp_type == CNIC_ULP_FCOE)
+                               cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+                       SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+               }
+               break;
+       }
 
        default:
                BNX2X_ERR("unknown command %x\n", ctl->cmd);
index a34362e..5ac6160 100644 (file)
@@ -30,6 +30,8 @@
 
 #define BNX2X_MAX_EMUL_MULTI           16
 
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
 /**** Exe Queue interfaces ****/
 
 /**
@@ -441,6 +443,36 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
        return true;
 }
 
+static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+                               int n, u8 *buf)
+{
+       struct bnx2x_vlan_mac_registry_elem *pos;
+       u8 *next = buf;
+       int counter = 0;
+
+       /* traverse list */
+       list_for_each_entry(pos, &o->head, link) {
+               if (counter < n) {
+                       /* place leading zeroes in buffer */
+                       memset(next, 0, MAC_LEADING_ZERO_CNT);
+
+                       /* place mac after leading zeroes*/
+                       memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
+                              ETH_ALEN);
+
+                       /* calculate address of next element and
+                        * advance counter
+                        */
+                       counter++;
+                       next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+
+                       DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
+                          counter, next, pos->u.mac.mac);
+               }
+       }
+       return counter * ETH_ALEN;
+}
+
 /* check_add() callbacks */
 static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
                               union bnx2x_classification_ramrod_data *data)
@@ -1886,6 +1918,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
                mac_obj->check_move        = bnx2x_check_move;
                mac_obj->ramrod_cmd        =
                        RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+               mac_obj->get_n_elements    = bnx2x_get_n_elements;
 
                /* Exe Queue */
                bnx2x_exe_queue_init(bp,
index 9a517c2..992308f 100644 (file)
@@ -285,6 +285,19 @@ struct bnx2x_vlan_mac_obj {
        /* RAMROD command to be used */
        int                             ramrod_cmd;
 
+       /* copy first n elements onto preallocated buffer
+        *
+        * @param n number of elements to get
+        * @param buf buffer preallocated by caller into which elements
+        *            will be copied. Note elements are 4-byte aligned
+        *            so buffer size must be able to accomodate the
+        *            aligned elements.
+        *
+        * @return number of copied bytes
+        */
+       int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+                             int n, u8 *buf);
+
        /**
         * Checks if ADD-ramrod with the given params may be performed.
         *
index 3034f0e..bc0121a 100644 (file)
@@ -39,6 +39,17 @@ static inline long bnx2x_hilo(u32 *hiref)
 #endif
 }
 
+static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+{
+       u16 res = sizeof(struct host_port_stats) >> 2;
+
+       /* if PFC stats are not supported by the MFW, don't DMA them */
+       if (!(bp->flags &  BC_SUPPORTS_PFC_STATS))
+               res -= (sizeof(u32)*4) >> 2;
+
+       return res;
+}
+
 /*
  * Init service functions
  */
@@ -178,7 +189,8 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
                                   DMAE_LEN32_RD_MAX * 4);
        dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
                                   DMAE_LEN32_RD_MAX * 4);
-       dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+       dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
+
        dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_val = DMAE_COMP_VAL;
@@ -217,7 +229,7 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
                dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
                dmae->dst_addr_lo = bp->port.port_stx >> 2;
                dmae->dst_addr_hi = 0;
-               dmae->len = sizeof(struct host_port_stats) >> 2;
+               dmae->len = bnx2x_get_port_stats_dma_len(bp);
                dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
                dmae->comp_addr_hi = 0;
                dmae->comp_val = 1;
@@ -540,6 +552,25 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
                UPDATE_STAT64(tx_stat_gterr,
                                tx_stat_dot3statsinternalmactransmiterrors);
                UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+               /* collect PFC stats */
+               DIFF_64(diff.hi, new->tx_stat_gtpp_hi,
+                       pstats->pfc_frames_tx_hi,
+                       diff.lo, new->tx_stat_gtpp_lo,
+                       pstats->pfc_frames_tx_lo);
+               pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
+               pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
+               ADD_64(pstats->pfc_frames_tx_hi, diff.hi,
+                       pstats->pfc_frames_tx_lo, diff.lo);
+
+               DIFF_64(diff.hi, new->rx_stat_grpp_hi,
+                       pstats->pfc_frames_rx_hi,
+                       diff.lo, new->rx_stat_grpp_lo,
+                       pstats->pfc_frames_rx_lo);
+               pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
+               pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
+               ADD_64(pstats->pfc_frames_rx_hi, diff.hi,
+                       pstats->pfc_frames_rx_lo, diff.lo);
        }
 
        estats->pause_frames_received_hi =
@@ -551,6 +582,15 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
                                pstats->mac_stx[1].tx_stat_outxoffsent_hi;
        estats->pause_frames_sent_lo =
                                pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+       estats->pfc_frames_received_hi =
+                               pstats->pfc_frames_rx_hi;
+       estats->pfc_frames_received_lo =
+                               pstats->pfc_frames_rx_lo;
+       estats->pfc_frames_sent_hi =
+                               pstats->pfc_frames_tx_hi;
+       estats->pfc_frames_sent_lo =
+                               pstats->pfc_frames_tx_lo;
 }
 
 static void bnx2x_mstat_stats_update(struct bnx2x *bp)
@@ -571,6 +611,11 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
        ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
        ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
 
+       /* collect pfc stats */
+       ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
+               pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
+       ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
+               pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
 
        ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
        ADD_STAT64(stats_tx.tx_gt127,
@@ -628,6 +673,15 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
                                pstats->mac_stx[1].tx_stat_outxoffsent_hi;
        estats->pause_frames_sent_lo =
                                pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+       estats->pfc_frames_received_hi =
+                               pstats->pfc_frames_rx_hi;
+       estats->pfc_frames_received_lo =
+                               pstats->pfc_frames_rx_lo;
+       estats->pfc_frames_sent_hi =
+                               pstats->pfc_frames_tx_hi;
+       estats->pfc_frames_sent_lo =
+                               pstats->pfc_frames_tx_lo;
 }
 
 static void bnx2x_emac_stats_update(struct bnx2x *bp)
@@ -740,7 +794,7 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
        estats->brb_drop_hi = pstats->brb_drop_hi;
        estats->brb_drop_lo = pstats->brb_drop_lo;
 
-       pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+       pstats->host_port_stats_counter++;
 
        if (!BP_NOMCP(bp)) {
                u32 nig_timer_max =
@@ -1265,7 +1319,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
                dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
                dmae->dst_addr_lo = bp->port.port_stx >> 2;
                dmae->dst_addr_hi = 0;
-               dmae->len = sizeof(struct host_port_stats) >> 2;
+               dmae->len = bnx2x_get_port_stats_dma_len(bp);
                if (bp->func_stx) {
                        dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
                        dmae->comp_addr_hi = 0;
@@ -1382,7 +1436,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
        dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
        dmae->dst_addr_lo = bp->port.port_stx >> 2;
        dmae->dst_addr_hi = 0;
-       dmae->len = sizeof(struct host_port_stats) >> 2;
+       dmae->len = bnx2x_get_port_stats_dma_len(bp);
        dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_val = DMAE_COMP_VAL;
@@ -1459,6 +1513,7 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
 static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
 {
        int i;
+       int first_queue_query_index;
        struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
 
        dma_addr_t cur_data_offset;
@@ -1514,14 +1569,40 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
        cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
        cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
 
+       /**** FCoE FW statistics data ****/
+       if (!NO_FCOE(bp)) {
+               cur_data_offset = bp->fw_stats_data_mapping +
+                       offsetof(struct bnx2x_fw_stats_data, fcoe);
+
+               cur_query_entry =
+                       &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
+
+               cur_query_entry->kind = STATS_TYPE_FCOE;
+               /* For FCoE query index is a DONT CARE */
+               cur_query_entry->index = BP_PORT(bp);
+               cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+               cur_query_entry->address.hi =
+                       cpu_to_le32(U64_HI(cur_data_offset));
+               cur_query_entry->address.lo =
+                       cpu_to_le32(U64_LO(cur_data_offset));
+       }
+
        /**** Clients' queries ****/
        cur_data_offset = bp->fw_stats_data_mapping +
                offsetof(struct bnx2x_fw_stats_data, queue_stats);
 
+       /* first queue query index depends whether FCoE offloaded request will
+        * be included in the ramrod
+        */
+       if (!NO_FCOE(bp))
+               first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
+       else
+               first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
+
        for_each_eth_queue(bp, i) {
                cur_query_entry =
                        &bp->fw_stats_req->
-                                       query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+                                       query[first_queue_query_index + i];
 
                cur_query_entry->kind = STATS_TYPE_QUEUE;
                cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
@@ -1533,6 +1614,21 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
 
                cur_data_offset += sizeof(struct per_queue_stats);
        }
+
+       /* add FCoE queue query if needed */
+       if (!NO_FCOE(bp)) {
+               cur_query_entry =
+                       &bp->fw_stats_req->
+                                       query[first_queue_query_index + i];
+
+               cur_query_entry->kind = STATS_TYPE_QUEUE;
+               cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+               cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+               cur_query_entry->address.hi =
+                       cpu_to_le32(U64_HI(cur_data_offset));
+               cur_query_entry->address.lo =
+                       cpu_to_le32(U64_LO(cur_data_offset));
+       }
 }
 
 void bnx2x_stats_init(struct bnx2x *bp)
index 5d8ce2f..683deb0 100644 (file)
@@ -193,6 +193,12 @@ struct bnx2x_eth_stats {
        u32 total_tpa_aggregated_frames_lo;
        u32 total_tpa_bytes_hi;
        u32 total_tpa_bytes_lo;
+
+       /* PFC */
+       u32 pfc_frames_received_hi;
+       u32 pfc_frames_received_lo;
+       u32 pfc_frames_sent_hi;
+       u32 pfc_frames_sent_lo;
 };
 
 
index b336e55..4bcb67e 100644 (file)
@@ -250,6 +250,21 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
        return io->data;
 }
 
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+       struct drv_ctl_info info;
+
+       if (reg)
+               info.cmd = DRV_CTL_ULP_REGISTER_CMD;
+       else
+               info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+
+       info.data.ulp_type = ulp_type;
+       ethdev->drv_ctl(dev->netdev, &info);
+}
+
 static int cnic_in_use(struct cnic_sock *csk)
 {
        return test_bit(SK_F_INUSE, &csk->flags);
@@ -563,6 +578,8 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 
        mutex_unlock(&cnic_lock);
 
+       cnic_ulp_ctl(dev, ulp_type, true);
+
        return 0;
 
 }
@@ -602,6 +619,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
        if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
                netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 
+       cnic_ulp_ctl(dev, ulp_type, false);
+
        return 0;
 }
 EXPORT_SYMBOL(cnic_unregister_driver);
@@ -3052,9 +3071,26 @@ static void cnic_ulp_start(struct cnic_dev *dev)
        }
 }
 
+static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_ulp_ops *ulp_ops;
+       int rc;
+
+       mutex_lock(&cnic_lock);
+       ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+       if (ulp_ops && ulp_ops->cnic_get_stats)
+               rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
+       else
+               rc = -ENODEV;
+       mutex_unlock(&cnic_lock);
+       return rc;
+}
+
 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
 {
        struct cnic_dev *dev = data;
+       int ulp_type = CNIC_ULP_ISCSI;
 
        switch (info->cmd) {
        case CNIC_CTL_STOP_CMD:
@@ -3100,6 +3136,15 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
                }
                break;
        }
+       case CNIC_CTL_FCOE_STATS_GET_CMD:
+               ulp_type = CNIC_ULP_FCOE;
+               /* fall through */
+       case CNIC_CTL_ISCSI_STATS_GET_CMD:
+               cnic_hold(dev);
+               cnic_copy_ulp_stats(dev, ulp_type);
+               cnic_put(dev);
+               break;
+
        default:
                return -EINVAL;
        }
@@ -5288,6 +5333,8 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
        cdev->pcidev = pdev;
        cp->chip_id = ethdev->chip_id;
 
+       cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
+
        if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
                cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
        if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
index 79443e0..d1f6456 100644 (file)
@@ -86,6 +86,8 @@ struct kcqe {
 #define CNIC_CTL_START_CMD             2
 #define CNIC_CTL_COMPLETION_CMD                3
 #define CNIC_CTL_STOP_ISCSI_CMD                4
+#define CNIC_CTL_FCOE_STATS_GET_CMD    5
+#define CNIC_CTL_ISCSI_STATS_GET_CMD   6
 
 #define DRV_CTL_IO_WR_CMD              0x101
 #define DRV_CTL_IO_RD_CMD              0x102
@@ -96,6 +98,8 @@ struct kcqe {
 #define DRV_CTL_STOP_L2_CMD            0x107
 #define DRV_CTL_RET_L2_SPQ_CREDIT_CMD  0x10c
 #define DRV_CTL_ISCSI_STOPPED_CMD      0x10d
+#define DRV_CTL_ULP_REGISTER_CMD       0x10e
+#define DRV_CTL_ULP_UNREGISTER_CMD     0x10f
 
 struct cnic_ctl_completion {
        u32     cid;
@@ -133,6 +137,7 @@ struct drv_ctl_info {
                struct drv_ctl_spq_credit credit;
                struct drv_ctl_io io;
                struct drv_ctl_l2_ring ring;
+               int ulp_type;
                char bytes[MAX_DRV_CTL_DATA];
        } data;
 };
@@ -201,6 +206,7 @@ struct cnic_eth_dev {
                                               struct kwqe_16 *[], u32);
        int             (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
        unsigned long   reserved1[2];
+       union drv_info_to_mcp   *addr_drv_info_to_mcp;
 };
 
 struct cnic_sockaddr {
@@ -297,6 +303,8 @@ struct cnic_dev {
        int             max_fcoe_conn;
        int             max_rdma_conn;
 
+       union drv_info_to_mcp   *stats_addr;
+
        void            *cnic_priv;
 };
 
@@ -326,6 +334,7 @@ struct cnic_ulp_ops {
        void (*cm_remote_abort)(struct cnic_sock *);
        int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
                                  char *data, u16 data_size);
+       int (*cnic_get_stats)(void *ulp_ctx);
        struct module *owner;
        atomic_t ref_count;
 };
index cf36312..076e02a 100644 (file)
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    121
+#define TG3_MIN_NUM                    122
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "November 2, 2011"
+#define DRV_MODULE_RELDATE     "December 7, 2011"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -135,7 +135,6 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
        (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
         TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
 #define TG3_DEF_RX_JUMBO_RING_PENDING  100
-#define TG3_RSS_INDIR_TBL_SIZE         128
 
 /* Do not place this n-ring entries value into the tp struct itself,
  * we really want to expose these constants to GCC so that modulo et
@@ -199,7 +198,8 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 /* minimum number of free TX descriptors required to wake up TX process */
 #define TG3_TX_WAKEUP_THRESH(tnapi)            ((tnapi)->tx_pending / 4)
-#define TG3_TX_BD_DMA_MAX              4096
+#define TG3_TX_BD_DMA_MAX_2K           2048
+#define TG3_TX_BD_DMA_MAX_4K           4096
 
 #define TG3_RAW_IP_ALIGN 2
 
@@ -1670,22 +1670,6 @@ static void tg3_link_report(struct tg3 *tp)
        }
 }
 
-static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
-{
-       u16 miireg;
-
-       if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
-               miireg = ADVERTISE_PAUSE_CAP;
-       else if (flow_ctrl & FLOW_CTRL_TX)
-               miireg = ADVERTISE_PAUSE_ASYM;
-       else if (flow_ctrl & FLOW_CTRL_RX)
-               miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-       else
-               miireg = 0;
-
-       return miireg;
-}
-
 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
 {
        u16 miireg;
@@ -1786,7 +1770,7 @@ static void tg3_adjust_link(struct net_device *dev)
                if (phydev->duplex == DUPLEX_HALF)
                        mac_mode |= MAC_MODE_HALF_DUPLEX;
                else {
-                       lcl_adv = tg3_advert_flowctrl_1000T(
+                       lcl_adv = mii_advertise_flowctrl(
                                  tp->link_config.flowctrl);
 
                        if (phydev->pause)
@@ -2154,7 +2138,7 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
        if (tp->link_config.active_speed == SPEED_1000 &&
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+            tg3_flag(tp, 57765_CLASS)) &&
            !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
                val = MII_TG3_DSP_TAP26_ALNOKO |
                      MII_TG3_DSP_TAP26_RMRXSTO;
@@ -2673,8 +2657,7 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
        bool need_vaux = false;
 
        /* The GPIOs do something completely different on 57765. */
-       if (!tg3_flag(tp, IS_NIC) ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
                return;
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
@@ -3589,24 +3572,23 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
 
        new_adv = ADVERTISE_CSMA;
        new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
-       new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
+       new_adv |= mii_advertise_flowctrl(flowctrl);
 
        err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
        if (err)
                goto done;
 
-       if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
-               goto done;
-
-       new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
+       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+               new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
-           tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
-               new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+               if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+                   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+                       new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 
-       err = tg3_writephy(tp, MII_CTRL1000, new_adv);
-       if (err)
-               goto done;
+               err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+               if (err)
+                       goto done;
+       }
 
        if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
                goto done;
@@ -3632,6 +3614,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
                switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
                case ASIC_REV_5717:
                case ASIC_REV_57765:
+               case ASIC_REV_57766:
                case ASIC_REV_5719:
                        /* If we advertised any eee advertisements above... */
                        if (val)
@@ -3768,65 +3751,61 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
        return err;
 }
 
-static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
 {
-       u32 adv_reg, all_mask = 0;
+       u32 advmsk, tgtadv, advertising;
 
-       all_mask = ethtool_adv_to_mii_adv_t(mask) & ADVERTISE_ALL;
+       advertising = tp->link_config.advertising;
+       tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
 
-       if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
-               return 0;
+       advmsk = ADVERTISE_ALL;
+       if (tp->link_config.active_duplex == DUPLEX_FULL) {
+               tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
+               advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+       }
 
-       if ((adv_reg & ADVERTISE_ALL) != all_mask)
-               return 0;
+       if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+               return false;
+
+       if ((*lcladv & advmsk) != tgtadv)
+               return false;
 
        if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
                u32 tg3_ctrl;
 
-               all_mask = ethtool_adv_to_mii_ctrl1000_t(mask);
+               tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
 
                if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
-                       return 0;
+                       return false;
 
                tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
-               if (tg3_ctrl != all_mask)
-                       return 0;
+               if (tg3_ctrl != tgtadv)
+                       return false;
        }
 
-       return 1;
+       return true;
 }
 
-static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
 {
-       u32 curadv, reqadv;
+       u32 lpeth = 0;
 
-       if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
-               return 1;
-
-       curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-       reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+               u32 val;
 
-       if (tp->link_config.active_duplex == DUPLEX_FULL) {
-               if (curadv != reqadv)
-                       return 0;
+               if (tg3_readphy(tp, MII_STAT1000, &val))
+                       return false;
 
-               if (tg3_flag(tp, PAUSE_AUTONEG))
-                       tg3_readphy(tp, MII_LPA, rmtadv);
-       } else {
-               /* Reprogram the advertisement register, even if it
-                * does not affect the current link.  If the link
-                * gets renegotiated in the future, we can save an
-                * additional renegotiation cycle by advertising
-                * it correctly in the first place.
-                */
-               if (curadv != reqadv) {
-                       *lcladv &= ~(ADVERTISE_PAUSE_CAP |
-                                    ADVERTISE_PAUSE_ASYM);
-                       tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
-               }
+               lpeth = mii_stat1000_to_ethtool_lpa_t(val);
        }
 
-       return 1;
+       if (tg3_readphy(tp, MII_LPA, rmtadv))
+               return false;
+
+       lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
+       tp->link_config.rmt_adv = lpeth;
+
+       return true;
 }
 
 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
@@ -3933,6 +3912,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
        current_speed = SPEED_INVALID;
        current_duplex = DUPLEX_INVALID;
        tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
+       tp->link_config.rmt_adv = 0;
 
        if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
                err = tg3_phy_auxctl_read(tp,
@@ -3988,12 +3968,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
 
                if (tp->link_config.autoneg == AUTONEG_ENABLE) {
                        if ((bmcr & BMCR_ANENABLE) &&
-                           tg3_copper_is_advertising_all(tp,
-                                               tp->link_config.advertising)) {
-                               if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
-                                                                 &rmt_adv))
-                                       current_link_up = 1;
-                       }
+                           tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
+                           tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
+                               current_link_up = 1;
                } else {
                        if (!(bmcr & BMCR_ANENABLE) &&
                            tp->link_config.speed == current_speed &&
@@ -4629,6 +4606,9 @@ restart_autoneg:
                        if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
                                remote_adv |= LPA_1000XPAUSE_ASYM;
 
+                       tp->link_config.rmt_adv =
+                                          mii_adv_to_ethtool_adv_x(remote_adv);
+
                        tg3_setup_flow_control(tp, local_adv, remote_adv);
                        current_link_up = 1;
                        tp->serdes_counter = 0;
@@ -4700,6 +4680,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
                        if (rxflags & MR_LP_ADV_ASYM_PAUSE)
                                remote_adv |= LPA_1000XPAUSE_ASYM;
 
+                       tp->link_config.rmt_adv =
+                                          mii_adv_to_ethtool_adv_x(remote_adv);
+
                        tg3_setup_flow_control(tp, local_adv, remote_adv);
 
                        current_link_up = 1;
@@ -4782,6 +4765,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
        udelay(40);
 
        current_link_up = 0;
+       tp->link_config.rmt_adv = 0;
        mac_status = tr32(MAC_STATUS);
 
        if (tg3_flag(tp, HW_AUTONEG))
@@ -4873,6 +4857,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
        current_link_up = 0;
        current_speed = SPEED_INVALID;
        current_duplex = DUPLEX_INVALID;
+       tp->link_config.rmt_adv = 0;
 
        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -4979,6 +4964,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
                                        current_duplex = DUPLEX_FULL;
                                else
                                        current_duplex = DUPLEX_HALF;
+
+                               tp->link_config.rmt_adv =
+                                          mii_adv_to_ethtool_adv_x(remote_adv);
                        } else if (!tg3_flag(tp, 5780_CLASS)) {
                                /* Link is up via parallel detect */
                        } else {
@@ -6437,25 +6425,25 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
        bool hwbug = false;
 
        if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
-               hwbug = 1;
+               hwbug = true;
 
        if (tg3_4g_overflow_test(map, len))
-               hwbug = 1;
+               hwbug = true;
 
        if (tg3_40bit_overflow_test(tp, map, len))
-               hwbug = 1;
+               hwbug = true;
 
-       if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+       if (tp->dma_limit) {
                u32 prvidx = *entry;
                u32 tmp_flag = flags & ~TXD_FLAG_END;
-               while (len > TG3_TX_BD_DMA_MAX && *budget) {
-                       u32 frag_len = TG3_TX_BD_DMA_MAX;
-                       len -= TG3_TX_BD_DMA_MAX;
+               while (len > tp->dma_limit && *budget) {
+                       u32 frag_len = tp->dma_limit;
+                       len -= tp->dma_limit;
 
                        /* Avoid the 8byte DMA problem */
                        if (len <= 8) {
-                               len += TG3_TX_BD_DMA_MAX / 2;
-                               frag_len = TG3_TX_BD_DMA_MAX / 2;
+                               len += tp->dma_limit / 2;
+                               frag_len = tp->dma_limit / 2;
                        }
 
                        tnapi->tx_buffers[*entry].fragmented = true;
@@ -6476,7 +6464,7 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
                                *budget -= 1;
                                *entry = NEXT_TX(*entry);
                        } else {
-                               hwbug = 1;
+                               hwbug = true;
                                tnapi->tx_buffers[prvidx].fragmented = false;
                        }
                }
@@ -7588,8 +7576,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
                if (tnapi->hw_status)
                        memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
        }
-       if (tp->hw_stats)
-               memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 
        return err;
 }
@@ -7905,6 +7891,11 @@ static int tg3_chip_reset(struct tg3 *tp)
        return 0;
 }
 
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
+                                                struct rtnl_link_stats64 *);
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
+                                               struct tg3_ethtool_stats *);
+
 /* tp->lock is held. */
 static int tg3_halt(struct tg3 *tp, int kind, int silent)
 {
@@ -7922,6 +7913,15 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
        tg3_write_sig_legacy(tp, kind);
        tg3_write_sig_post_reset(tp, kind);
 
+       if (tp->hw_stats) {
+               /* Save the stats across chip resets... */
+               tg3_get_stats64(tp->dev, &tp->net_stats_prev),
+               tg3_get_estats(tp, &tp->estats_prev);
+
+               /* And make sure the next sample is new data */
+               memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+       }
+
        if (err)
                return err;
 
@@ -8065,7 +8065,7 @@ static void tg3_rings_reset(struct tg3 *tp)
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
        else if (tg3_flag(tp, 5717_PLUS))
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       else if (tg3_flag(tp, 57765_CLASS))
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
        else
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8082,7 +8082,7 @@ static void tg3_rings_reset(struct tg3 *tp)
        else if (!tg3_flag(tp, 5705_PLUS))
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
-                GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+                tg3_flag(tp, 57765_CLASS))
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
        else
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
@@ -8220,6 +8220,54 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
                tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
 }
 
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
+{
+       int i;
+
+       for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+               tp->rss_ind_tbl[i] =
+                       ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
+}
+
+static void tg3_rss_check_indir_tbl(struct tg3 *tp)
+{
+       int i;
+
+       if (!tg3_flag(tp, SUPPORT_MSIX))
+               return;
+
+       if (tp->irq_cnt <= 2) {
+               memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
+               return;
+       }
+
+       /* Validate table against current IRQ count */
+       for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
+               if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
+                       break;
+       }
+
+       if (i != TG3_RSS_INDIR_TBL_SIZE)
+               tg3_rss_init_dflt_indir_tbl(tp);
+}
+
+static void tg3_rss_write_indir_tbl(struct tg3 *tp)
+{
+       int i = 0;
+       u32 reg = MAC_RSS_INDIR_TBL_0;
+
+       while (i < TG3_RSS_INDIR_TBL_SIZE) {
+               u32 val = tp->rss_ind_tbl[i];
+               i++;
+               for (; i % 8; i++) {
+                       val <<= 4;
+                       val |= tp->rss_ind_tbl[i];
+               }
+               tw32(reg, val);
+               reg += 4;
+       }
+}
+
 /* tp->lock is held. */
 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 {
@@ -8326,7 +8374,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tw32(GRC_MODE, grc_mode);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+       if (tg3_flag(tp, 57765_CLASS)) {
                if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
                        u32 grc_mode = tr32(GRC_MODE);
 
@@ -8414,7 +8462,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
                if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
                        val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+               if (!tg3_flag(tp, 57765_CLASS) &&
                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
                        val |= DMA_RWCTRL_TAGGED_STAT_WA;
                tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
@@ -8561,7 +8609,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                        tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
                             val | BDINFO_FLAGS_USE_EXT_RECV);
                        if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
-                           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+                           tg3_flag(tp, 57765_CLASS))
                                tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
                                     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
                } else {
@@ -8647,6 +8695,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (tg3_flag(tp, PCI_EXPRESS))
                rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+               rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+
        if (tg3_flag(tp, HW_TSO_1) ||
            tg3_flag(tp, HW_TSO_2) ||
            tg3_flag(tp, HW_TSO_3))
@@ -8910,28 +8961,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        udelay(100);
 
        if (tg3_flag(tp, ENABLE_RSS)) {
-               int i = 0;
-               u32 reg = MAC_RSS_INDIR_TBL_0;
-
-               if (tp->irq_cnt == 2) {
-                       for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
-                               tw32(reg, 0x0);
-                               reg += 4;
-                       }
-               } else {
-                       u32 val;
-
-                       while (i < TG3_RSS_INDIR_TBL_SIZE) {
-                               val = i % (tp->irq_cnt - 1);
-                               i++;
-                               for (; i % 8; i++) {
-                                       val <<= 4;
-                                       val |= (i % (tp->irq_cnt - 1));
-                               }
-                               tw32(reg, val);
-                               reg += 4;
-                       }
-               }
+               tg3_rss_write_indir_tbl(tp);
 
                /* Setup the "secret" hash key. */
                tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
@@ -8988,7 +9018,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        /* Prevent chip from dropping frames when flow control
         * is enabled.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       if (tg3_flag(tp, 57765_CLASS))
                val = 1;
        else
                val = 2;
@@ -9203,7 +9233,7 @@ static void tg3_timer(unsigned long __opaque)
        spin_lock(&tp->lock);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+           tg3_flag(tp, 57765_CLASS))
                tg3_chk_missed_msi(tp);
 
        if (!tg3_flag(tp, TAGGED_STATUS)) {
@@ -9655,6 +9685,8 @@ static int tg3_open(struct net_device *dev)
         */
        tg3_ints_init(tp);
 
+       tg3_rss_check_indir_tbl(tp);
+
        /* The placement of this call is tied
         * to the setup and use of Host TX descriptors.
         */
@@ -9686,8 +9718,8 @@ static int tg3_open(struct net_device *dev)
                tg3_free_rings(tp);
        } else {
                if (tg3_flag(tp, TAGGED_STATUS) &&
-                       GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-                       GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
+                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+                   !tg3_flag(tp, 57765_CLASS))
                        tp->timer_offset = HZ;
                else
                        tp->timer_offset = HZ / 10;
@@ -9768,10 +9800,6 @@ err_out1:
        return err;
 }
 
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
-                                                struct rtnl_link_stats64 *);
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
-
 static int tg3_close(struct net_device *dev)
 {
        int i;
@@ -9803,10 +9831,9 @@ static int tg3_close(struct net_device *dev)
 
        tg3_ints_fini(tp);
 
-       tg3_get_stats64(tp->dev, &tp->net_stats_prev);
-
-       memcpy(&tp->estats_prev, tg3_get_estats(tp),
-              sizeof(tp->estats_prev));
+       /* Clear stats across close / open calls */
+       memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+       memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
 
        tg3_napi_fini(tp);
 
@@ -9854,9 +9881,9 @@ static u64 calc_crc_errors(struct tg3 *tp)
        estats->member =        old_estats->member + \
                                get_stat64(&hw_stats->member)
 
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
+                                              struct tg3_ethtool_stats *estats)
 {
-       struct tg3_ethtool_stats *estats = &tp->estats;
        struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
@@ -10304,9 +10331,10 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        cmd->advertising |= ADVERTISED_Asym_Pause;
                }
        }
-       if (netif_running(dev)) {
+       if (netif_running(dev) && netif_carrier_ok(dev)) {
                ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
                cmd->duplex = tp->link_config.active_duplex;
+               cmd->lp_advertising = tp->link_config.rmt_adv;
                if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
                        if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
                                cmd->eth_tp_mdix = ETH_TP_MDI_X;
@@ -10583,12 +10611,12 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
 
        epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
 
-       if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
+       if (tp->link_config.flowctrl & FLOW_CTRL_RX)
                epause->rx_pause = 1;
        else
                epause->rx_pause = 0;
 
-       if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
+       if (tp->link_config.flowctrl & FLOW_CTRL_TX)
                epause->tx_pause = 1;
        else
                epause->tx_pause = 0;
@@ -10708,6 +10736,78 @@ static int tg3_get_sset_count(struct net_device *dev, int sset)
        }
 }
 
+static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+                        u32 *rules __always_unused)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       if (!tg3_flag(tp, SUPPORT_MSIX))
+               return -EOPNOTSUPP;
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               if (netif_running(tp->dev))
+                       info->data = tp->irq_cnt;
+               else {
+                       info->data = num_online_cpus();
+                       if (info->data > TG3_IRQ_MAX_VECS_RSS)
+                               info->data = TG3_IRQ_MAX_VECS_RSS;
+               }
+
+               /* The first interrupt vector only
+                * handles link interrupts.
+                */
+               info->data -= 1;
+               return 0;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
+{
+       u32 size = 0;
+       struct tg3 *tp = netdev_priv(dev);
+
+       if (tg3_flag(tp, SUPPORT_MSIX))
+               size = TG3_RSS_INDIR_TBL_SIZE;
+
+       return size;
+}
+
+static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       int i;
+
+       for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+               indir[i] = tp->rss_ind_tbl[i];
+
+       return 0;
+}
+
+static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       size_t i;
+
+       for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+               tp->rss_ind_tbl[i] = indir[i];
+
+       if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
+               return 0;
+
+       /* It is legal to write the indirection
+        * table while the device is running.
+        */
+       tg3_full_lock(tp, 0);
+       tg3_rss_write_indir_tbl(tp);
+       tg3_full_unlock(tp);
+
+       return 0;
+}
+
 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
        switch (stringset) {
@@ -10762,7 +10862,8 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
                                   struct ethtool_stats *estats, u64 *tmp_stats)
 {
        struct tg3 *tp = netdev_priv(dev);
-       memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+
+       tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
 }
 
 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
@@ -11345,7 +11446,7 @@ static int tg3_test_memory(struct tg3 *tp)
 
        if (tg3_flag(tp, 5717_PLUS))
                mem_tbl = mem_tbl_5717;
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       else if (tg3_flag(tp, 57765_CLASS))
                mem_tbl = mem_tbl_57765;
        else if (tg3_flag(tp, 5755_PLUS))
                mem_tbl = mem_tbl_5755;
@@ -11937,6 +12038,10 @@ static const struct ethtool_ops tg3_ethtool_ops = {
        .get_coalesce           = tg3_get_coalesce,
        .set_coalesce           = tg3_set_coalesce,
        .get_sset_count         = tg3_get_sset_count,
+       .get_rxnfc              = tg3_get_rxnfc,
+       .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
+       .get_rxfh_indir         = tg3_get_rxfh_indir,
+       .set_rxfh_indir         = tg3_set_rxfh_indir,
 };
 
 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -12606,7 +12711,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                        tg3_get_5906_nvram_info(tp);
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-                        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+                        tg3_flag(tp, 57765_CLASS))
                        tg3_get_57780_nvram_info(tp);
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
@@ -13315,7 +13420,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
        if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
            !tg3_flag(tp, ENABLE_APE) &&
            !tg3_flag(tp, ENABLE_ASF)) {
-               u32 bmsr, mask;
+               u32 bmsr, dummy;
 
                tg3_readphy(tp, MII_BMSR, &bmsr);
                if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -13328,10 +13433,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
 
                tg3_phy_set_wirespeed(tp);
 
-               mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
-                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
-               if (!tg3_copper_is_advertising_all(tp, mask)) {
+               if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
                        tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
                                            tp->link_config.flowctrl);
 
@@ -13453,6 +13555,17 @@ out_no_vpd:
                        strcpy(tp->board_part_number, "BCM57795");
                else
                        goto nomatch;
+       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+               if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
+                       strcpy(tp->board_part_number, "BCM57762");
+               else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
+                       strcpy(tp->board_part_number, "BCM57766");
+               else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
+                       strcpy(tp->board_part_number, "BCM57782");
+               else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
+                       strcpy(tp->board_part_number, "BCM57786");
+               else
+                       goto nomatch;
        } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
                strcpy(tp->board_part_number, "BCM95906");
        } else {
@@ -13791,7 +13904,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                         tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
                         tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
                         tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
-                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
                        pci_read_config_dword(tp->pdev,
                                              TG3PCI_GEN15_PRODID_ASICREV,
                                              &prod_id_asic_rev);
@@ -13938,7 +14055,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tg3_flag_set(tp, 5717_PLUS);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
-           tg3_flag(tp, 5717_PLUS))
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+               tg3_flag_set(tp, 57765_CLASS);
+
+       if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
                tg3_flag_set(tp, 57765_PLUS);
 
        /* Intentionally exclude ASIC_REV_5906 */
@@ -14024,6 +14144,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                if (tg3_flag(tp, 57765_PLUS)) {
                        tg3_flag_set(tp, SUPPORT_MSIX);
                        tp->irq_max = TG3_IRQ_MAX_VECS;
+                       tg3_rss_init_dflt_indir_tbl(tp);
                }
        }
 
@@ -14031,7 +14152,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tg3_flag_set(tp, SHORT_DMA_BUG);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
-               tg3_flag_set(tp, 4K_FIFO_LIMIT);
+               tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+               tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
@@ -14315,7 +14438,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+           tg3_flag(tp, 57765_CLASS))
                tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
 
        if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
index 9cc10a8..aea8f72 100644 (file)
@@ -31,6 +31,8 @@
 #define TG3_RX_RET_MAX_SIZE_5705       512
 #define TG3_RX_RET_MAX_SIZE_5717       4096
 
+#define TG3_RSS_INDIR_TBL_SIZE         128
+
 /* First 256 bytes are a mirror of PCI config space. */
 #define TG3PCI_VENDOR                  0x00000000
 #define  TG3PCI_VENDOR_BROADCOM                 0x14e4
 #define  TG3PCI_DEVICE_TIGON3_57795     0x16b6
 #define  TG3PCI_DEVICE_TIGON3_5719      0x1657
 #define  TG3PCI_DEVICE_TIGON3_5720      0x165f
+#define  TG3PCI_DEVICE_TIGON3_57762     0x1682
+#define  TG3PCI_DEVICE_TIGON3_57766     0x1686
+#define  TG3PCI_DEVICE_TIGON3_57786     0x16b3
+#define  TG3PCI_DEVICE_TIGON3_57782     0x16b7
 /* 0x04 --> 0x2c unused */
 #define TG3PCI_SUBVENDOR_ID_BROADCOM           PCI_VENDOR_ID_BROADCOM
 #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6   0x1644
 #define   ASIC_REV_57765                0x57785
 #define   ASIC_REV_5719                         0x5719
 #define   ASIC_REV_5720                         0x5720
+#define   ASIC_REV_57766                0x57766
 #define  GET_CHIP_REV(CHIP_REV_ID)     ((CHIP_REV_ID) >> 8)
 #define   CHIPREV_5700_AX               0x70
 #define   CHIPREV_5700_BX               0x71
 #define  RDMAC_MODE_MBUF_SBD_CRPT_ENAB  0x00002000
 #define  RDMAC_MODE_FIFO_SIZE_128       0x00020000
 #define  RDMAC_MODE_FIFO_LONG_BURST     0x00030000
+#define  RDMAC_MODE_JMB_2K_MMRR                 0x00800000
 #define  RDMAC_MODE_MULT_DMA_RD_DIS     0x01000000
 #define  RDMAC_MODE_IPV4_LSO_EN                 0x08000000
 #define  RDMAC_MODE_IPV6_LSO_EN                 0x10000000
@@ -2698,6 +2706,7 @@ struct tg3_link_config {
 #define DUPLEX_INVALID         0xff
 #define AUTONEG_INVALID                0xff
        u16                             active_speed;
+       u32                             rmt_adv;
 
        /* When we go in and out of low power mode we need
         * to swap with this state.
@@ -2873,6 +2882,8 @@ enum TG3_FLAGS {
        TG3_FLAG_NVRAM_BUFFERED,
        TG3_FLAG_SUPPORT_MSI,
        TG3_FLAG_SUPPORT_MSIX,
+       TG3_FLAG_USING_MSI,
+       TG3_FLAG_USING_MSIX,
        TG3_FLAG_PCIX_MODE,
        TG3_FLAG_PCI_HIGH_SPEED,
        TG3_FLAG_PCI_32BIT,
@@ -2888,7 +2899,6 @@ enum TG3_FLAGS {
        TG3_FLAG_CHIP_RESETTING,
        TG3_FLAG_INIT_COMPLETE,
        TG3_FLAG_TSO_BUG,
-       TG3_FLAG_IS_5788,
        TG3_FLAG_MAX_RXPEND_64,
        TG3_FLAG_TSO_CAPABLE,
        TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
@@ -2897,14 +2907,9 @@ enum TG3_FLAGS {
        TG3_FLAG_IS_NIC,
        TG3_FLAG_FLASH,
        TG3_FLAG_HW_TSO_1,
-       TG3_FLAG_5705_PLUS,
-       TG3_FLAG_5750_PLUS,
+       TG3_FLAG_HW_TSO_2,
        TG3_FLAG_HW_TSO_3,
-       TG3_FLAG_USING_MSI,
-       TG3_FLAG_USING_MSIX,
        TG3_FLAG_ICH_WORKAROUND,
-       TG3_FLAG_5780_CLASS,
-       TG3_FLAG_HW_TSO_2,
        TG3_FLAG_1SHOT_MSI,
        TG3_FLAG_NO_FWARE_REPORTED,
        TG3_FLAG_NO_NVRAM_ADDR_TRANS,
@@ -2918,18 +2923,23 @@ enum TG3_FLAGS {
        TG3_FLAG_RGMII_EXT_IBND_RX_EN,
        TG3_FLAG_RGMII_EXT_IBND_TX_EN,
        TG3_FLAG_CLKREQ_BUG,
-       TG3_FLAG_5755_PLUS,
        TG3_FLAG_NO_NVRAM,
        TG3_FLAG_ENABLE_RSS,
        TG3_FLAG_ENABLE_TSS,
        TG3_FLAG_SHORT_DMA_BUG,
        TG3_FLAG_USE_JUMBO_BDFLAG,
        TG3_FLAG_L1PLLPD_EN,
-       TG3_FLAG_57765_PLUS,
        TG3_FLAG_APE_HAS_NCSI,
-       TG3_FLAG_5717_PLUS,
        TG3_FLAG_4K_FIFO_LIMIT,
        TG3_FLAG_RESET_TASK_PENDING,
+       TG3_FLAG_5705_PLUS,
+       TG3_FLAG_IS_5788,
+       TG3_FLAG_5750_PLUS,
+       TG3_FLAG_5780_CLASS,
+       TG3_FLAG_5755_PLUS,
+       TG3_FLAG_57765_PLUS,
+       TG3_FLAG_57765_CLASS,
+       TG3_FLAG_5717_PLUS,
 
        /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
        TG3_FLAG_NUMBER_OF_FLAGS,       /* Last entry in enum TG3_FLAGS */
@@ -2993,6 +3003,7 @@ struct tg3 {
        /* begin "tx thread" cacheline section */
        void                            (*write32_tx_mbox) (struct tg3 *, u32,
                                                            u32);
+       u32                             dma_limit;
 
        /* begin "rx thread" cacheline section */
        struct tg3_napi                 napi[TG3_IRQ_MAX_VECS];
@@ -3013,7 +3024,6 @@ struct tg3 {
        unsigned long                   rx_dropped;
        unsigned long                   tx_dropped;
        struct rtnl_link_stats64        net_stats_prev;
-       struct tg3_ethtool_stats        estats;
        struct tg3_ethtool_stats        estats_prev;
 
        DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
@@ -3144,6 +3154,7 @@ struct tg3 {
        u32                             led_ctrl;
        u32                             phy_otp;
        u32                             setlpicnt;
+       u8                              rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE];
 
 #define TG3_BPN_SIZE                   24
        char                            board_part_number[TG3_BPN_SIZE];
index 74d3abc..6027302 100644 (file)
@@ -5,7 +5,7 @@
 
 obj-$(CONFIG_BNA) += bna.o
 
-bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
+bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
 bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
 bna-objs += cna_fwimg.o
 
index 8e62718..29f284f 100644 (file)
@@ -184,6 +184,41 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
                (dma_kva + bfa_cee_attr_meminfo());
 }
 
+/**
+ * bfa_cee_get_attr()
+ *
+ * @brief      Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in]  Pointer to the CEE module data structure.
+ *
+ * @return     Status
+ */
+enum bfa_status
+bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
+                   bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_cee_get_req *cmd;
+
+       BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+       if (!bfa_nw_ioc_is_operational(cee->ioc))
+               return BFA_STATUS_IOC_FAILURE;
+
+       if (cee->get_attr_pending == true)
+               return  BFA_STATUS_DEVBUSY;
+
+       cee->get_attr_pending = true;
+       cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
+       cee->attr = attr;
+       cee->cbfn.get_attr_cbfn = cbfn;
+       cee->cbfn.get_attr_cbarg = cbarg;
+       bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+                   bfa_ioc_portid(cee->ioc));
+       bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+       bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL);
+
+       return BFA_STATUS_OK;
+}
+
 /**
  * bfa_cee_isrs()
  *
index 58d54e9..93fde63 100644 (file)
@@ -59,5 +59,7 @@ u32 bfa_nw_cee_meminfo(void);
 void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
        u64 dma_pa);
 void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
-
+enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee,
+                               struct bfa_cee_attr *attr,
+                               bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
 #endif /* __BFA_CEE_H__ */
index 2f12d68..871c630 100644 (file)
@@ -219,41 +219,39 @@ enum {
  * All numerical fields are in big-endian format.
  */
 struct bfa_mfg_block {
-       u8              version;        /*!< manufacturing block version */
-       u8              mfg_sig[3];     /*!< characters 'M', 'F', 'G' */
-       u16     mfgsize;        /*!< mfg block size */
-       u16     u16_chksum;     /*!< old u16 checksum */
-       char            brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
-       char            brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
-       u8              mfg_day;        /*!< manufacturing day */
-       u8              mfg_month;      /*!< manufacturing month */
-       u16     mfg_year;       /*!< manufacturing year */
-       u64             mfg_wwn;        /*!< wwn base for this adapter */
-       u8              num_wwn;        /*!< number of wwns assigned */
-       u8              mfg_speeds;     /*!< speeds allowed for this adapter */
-       u8              rsv[2];
-       char            supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
-       char            supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
-       char
-               supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
-       char
-               supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
-       mac_t           mfg_mac;        /*!< mac address */
-       u8              num_mac;        /*!< number of mac addresses */
-       u8              rsv2;
-       u32             card_type;      /*!< card type */
-       char            cap_nic;        /*!< capability nic */
-       char            cap_cna;        /*!< capability cna */
-       char            cap_hba;        /*!< capability hba */
-       char            cap_fc16g;      /*!< capability fc 16g */
-       char            cap_sriov;      /*!< capability sriov */
-       char            cap_mezz;       /*!< capability mezz */
-       u8              rsv3;
-       u8              mfg_nports;     /*!< number of ports */
-       char            media[8];       /*!< xfi/xaui */
-       char            initial_mode[8];/*!< initial mode: hba/cna/nic */
-       u8              rsv4[84];
-       u8              md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+       u8      version;        /* manufacturing block version */
+       u8      mfg_sig[3];     /* characters 'M', 'F', 'G' */
+       u16     mfgsize;        /* mfg block size */
+       u16     u16_chksum;     /* old u16 checksum */
+       char    brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+       char    brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+       u8      mfg_day;        /* manufacturing day */
+       u8      mfg_month;      /* manufacturing month */
+       u16     mfg_year;       /* manufacturing year */
+       u64     mfg_wwn;        /* wwn base for this adapter */
+       u8      num_wwn;        /* number of wwns assigned */
+       u8      mfg_speeds;     /* speeds allowed for this adapter */
+       u8      rsv[2];
+       char    supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+       char    supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+       char    supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+       char    supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+       mac_t   mfg_mac;        /* base mac address */
+       u8      num_mac;        /* number of mac addresses */
+       u8      rsv2;
+       u32     card_type;      /* card type          */
+       char    cap_nic;        /* capability nic     */
+       char    cap_cna;        /* capability cna     */
+       char    cap_hba;        /* capability hba     */
+       char    cap_fc16g;      /* capability fc 16g      */
+       char    cap_sriov;      /* capability sriov       */
+       char    cap_mezz;       /* capability mezz        */
+       u8      rsv3;
+       u8      mfg_nports;     /* number of ports        */
+       char    media[8];       /* xfi/xaui           */
+       char    initial_mode[8]; /* initial mode: hba/cna/nic */
+       u8      rsv4[84];
+       u8      md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
 };
 
 #pragma pack()
@@ -293,4 +291,34 @@ enum bfa_mode {
        BFA_MODE_NIC            = 3
 };
 
+/*
+ *     Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE      32      /* partition entry size */
+#define BFA_FLASH_PART_MAX             32      /* maximal # of partitions */
+#define BFA_TOTAL_FLASH_SIZE           0x400000
+#define BFA_FLASH_PART_MFG             7
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr {
+       u32     part_type;      /* partition type */
+       u32     part_instance;  /* partition instance */
+       u32     part_off;       /* partition offset */
+       u32     part_size;      /* partition size */
+       u32     part_len;       /* partition content length */
+       u32     part_status;    /* partition status */
+       char    rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr {
+       u32     status; /* flash overall status */
+       u32     npart;  /* num of partitions */
+       struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX];
+};
+
 #endif /* __BFA_DEFS_H__ */
index b0307a0..abfad27 100644 (file)
@@ -74,6 +74,7 @@ static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
@@ -997,6 +998,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
 {
+       bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
        bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
@@ -1743,6 +1745,114 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
                bfa_q_deq(&mod->cmd_q, &cmd);
 }
 
+/**
+ * Read data from SMEM to host through PCI memmap
+ *
+ * @param[in]  ioc     memory for IOC
+ * @param[in]  tbuf    app memory to store data from smem
+ * @param[in]  soff    smem offset
+ * @param[in]  sz      size of smem in bytes
+ */
+static int
+bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
+{
+       u32 pgnum, loff, r32;
+       int i, len;
+       u32 *buf = tbuf;
+
+       pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+       loff = PSS_SMEM_PGOFF(soff);
+
+       /*
+        *  Hold semaphore to serialize pll init and fwtrc.
+       */
+       if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
+               return 1;
+
+       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+       len = sz/sizeof(u32);
+       for (i = 0; i < len; i++) {
+               r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+               buf[i] = be32_to_cpu(r32);
+               loff += sizeof(u32);
+
+               /**
+                * handle page offset wrap around
+                */
+               loff = PSS_SMEM_PGOFF(loff);
+               if (loff == 0) {
+                       pgnum++;
+                       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+               }
+       }
+
+       writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+              ioc->ioc_regs.host_page_num_fn);
+
+       /*
+        * release semaphore
+        */
+       readl(ioc->ioc_regs.ioc_init_sem_reg);
+       writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+       return 0;
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+       u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
+       int tlen, status = 0;
+
+       tlen = *trclen;
+       if (tlen > BNA_DBG_FWTRC_LEN)
+               tlen = BNA_DBG_FWTRC_LEN;
+
+       status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
+       *trclen = tlen;
+       return status;
+}
+
+/**
+ * Save firmware trace if configured.
+ */
+static void
+bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
+{
+       int tlen;
+
+       if (ioc->dbg_fwsave_once) {
+               ioc->dbg_fwsave_once = 0;
+               if (ioc->dbg_fwsave_len) {
+                       tlen = ioc->dbg_fwsave_len;
+                       bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+               }
+       }
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+       int tlen;
+
+       if (ioc->dbg_fwsave_len == 0)
+               return BFA_STATUS_ENOFSAVE;
+
+       tlen = *trclen;
+       if (tlen > ioc->dbg_fwsave_len)
+               tlen = ioc->dbg_fwsave_len;
+
+       memcpy(trcdata, ioc->dbg_fwsave, tlen);
+       *trclen = tlen;
+       return BFA_STATUS_OK;
+}
+
 static void
 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
 {
@@ -1751,6 +1861,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
         */
        ioc->cbfn->hbfail_cbfn(ioc->bfa);
        bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
+       bfa_nw_ioc_debug_save_ftrc(ioc);
 }
 
 /**
@@ -2058,6 +2169,16 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
        bfa_fsm_send_event(ioc, IOC_E_DISABLE);
 }
 
+/**
+ * Initialize memory for saving firmware trace.
+ */
+void
+bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
+{
+       ioc->dbg_fwsave = dbg_fwsave;
+       ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
+}
+
 static u32
 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
 {
@@ -2171,6 +2292,15 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
                bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
 }
 
+/**
+ * return true if IOC is operational
+ */
+bool
+bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+{
+       return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
 /**
  * Add to IOC heartbeat failure notification queue. To be used by common
  * modules such as cee, port, diag.
@@ -2471,3 +2601,366 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
                        msecs_to_jiffies(BFA_IOC_POLL_TOV));
        }
 }
+
+/*
+ *     Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ       2048
+#define BFA_FLASH_DMA_BUF_SZ   \
+       roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_cb(struct bfa_flash *flash)
+{
+       flash->op_busy = 0;
+       if (flash->cbfn)
+               flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
+{
+       struct bfa_flash *flash = cbarg;
+
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (flash->op_busy) {
+                       flash->status = BFA_STATUS_IOC_FAILURE;
+                       flash->cbfn(flash->cbarg, flash->status);
+                       flash->op_busy = 0;
+               }
+               break;
+       default:
+               break;
+       }
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash *flash)
+{
+       struct bfi_flash_write_req *msg =
+                       (struct bfi_flash_write_req *) flash->mb.msg;
+       u32     len;
+
+       msg->type = be32_to_cpu(flash->type);
+       msg->instance = flash->instance;
+       msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+       len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+              flash->residue : BFA_FLASH_DMA_BUF_SZ;
+       msg->length = be32_to_cpu(len);
+
+       /* indicate if it's the last msg of the whole write operation */
+       msg->last = (len == flash->residue) ? 1 : 0;
+
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+                   bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+       memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+       bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+       flash->residue -= len;
+       flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+       struct bfa_flash *flash = cbarg;
+       struct bfi_flash_read_req *msg =
+                       (struct bfi_flash_read_req *) flash->mb.msg;
+       u32     len;
+
+       msg->type = be32_to_cpu(flash->type);
+       msg->instance = flash->instance;
+       msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+       len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+              flash->residue : BFA_FLASH_DMA_BUF_SZ;
+       msg->length = be32_to_cpu(len);
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+                   bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+       bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
+{
+       struct bfa_flash *flash = flasharg;
+       u32     status;
+
+       union {
+               struct bfi_flash_query_rsp *query;
+               struct bfi_flash_write_rsp *write;
+               struct bfi_flash_read_rsp *read;
+               struct bfi_mbmsg   *msg;
+       } m;
+
+       m.msg = msg;
+
+       /* receiving response after ioc failure */
+       if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
+               return;
+
+       switch (msg->mh.msg_id) {
+       case BFI_FLASH_I2H_QUERY_RSP:
+               status = be32_to_cpu(m.query->status);
+               if (status == BFA_STATUS_OK) {
+                       u32     i;
+                       struct bfa_flash_attr *attr, *f;
+
+                       attr = (struct bfa_flash_attr *) flash->ubuf;
+                       f = (struct bfa_flash_attr *) flash->dbuf_kva;
+                       attr->status = be32_to_cpu(f->status);
+                       attr->npart = be32_to_cpu(f->npart);
+                       for (i = 0; i < attr->npart; i++) {
+                               attr->part[i].part_type =
+                                       be32_to_cpu(f->part[i].part_type);
+                               attr->part[i].part_instance =
+                                       be32_to_cpu(f->part[i].part_instance);
+                               attr->part[i].part_off =
+                                       be32_to_cpu(f->part[i].part_off);
+                               attr->part[i].part_size =
+                                       be32_to_cpu(f->part[i].part_size);
+                               attr->part[i].part_len =
+                                       be32_to_cpu(f->part[i].part_len);
+                               attr->part[i].part_status =
+                                       be32_to_cpu(f->part[i].part_status);
+                       }
+               }
+               flash->status = status;
+               bfa_flash_cb(flash);
+               break;
+       case BFI_FLASH_I2H_WRITE_RSP:
+               status = be32_to_cpu(m.write->status);
+               if (status != BFA_STATUS_OK || flash->residue == 0) {
+                       flash->status = status;
+                       bfa_flash_cb(flash);
+               } else
+                       bfa_flash_write_send(flash);
+               break;
+       case BFI_FLASH_I2H_READ_RSP:
+               status = be32_to_cpu(m.read->status);
+               if (status != BFA_STATUS_OK) {
+                       flash->status = status;
+                       bfa_flash_cb(flash);
+               } else {
+                       u32 len = be32_to_cpu(m.read->length);
+                       memcpy(flash->ubuf + flash->offset,
+                              flash->dbuf_kva, len);
+                       flash->residue -= len;
+                       flash->offset += len;
+                       if (flash->residue == 0) {
+                               flash->status = status;
+                               bfa_flash_cb(flash);
+                       } else
+                               bfa_flash_read_send(flash);
+               }
+               break;
+       case BFI_FLASH_I2H_BOOT_VER_RSP:
+       case BFI_FLASH_I2H_EVENT:
+               break;
+       default:
+               WARN_ON(1);
+       }
+}
+
+/*
+ * Flash memory info API.
+ */
+u32
+bfa_nw_flash_meminfo(void)
+{
+       return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc  - ioc structure
+ * @param[in] dev  - device structure
+ */
+void
+bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
+{
+       flash->ioc = ioc;
+       flash->cbfn = NULL;
+       flash->cbarg = NULL;
+       flash->op_busy = 0;
+
+       bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+       bfa_q_qe_init(&flash->ioc_notify);
+       bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+       list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ */
+void
+bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
+{
+       flash->dbuf_kva = dm_kva;
+       flash->dbuf_pa = dm_pa;
+       memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+       dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+       dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
+                     bfa_cb_flash cbfn, void *cbarg)
+{
+       struct bfi_flash_query_req *msg =
+                       (struct bfi_flash_query_req *) flash->mb.msg;
+
+       if (!bfa_nw_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (flash->op_busy)
+               return BFA_STATUS_DEVBUSY;
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->ubuf = (u8 *) attr;
+
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+                   bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
+       bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
+                        void *buf, u32 len, u32 offset,
+                        bfa_cb_flash cbfn, void *cbarg)
+{
+       if (!bfa_nw_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /*
+        * 'len' must be in word (4-byte) boundary
+        */
+       if (!len || (len & 0x03))
+               return BFA_STATUS_FLASH_BAD_LEN;
+
+       if (type == BFA_FLASH_PART_MFG)
+               return BFA_STATUS_EINVAL;
+
+       if (flash->op_busy)
+               return BFA_STATUS_DEVBUSY;
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->type = type;
+       flash->instance = instance;
+       flash->residue = len;
+       flash->offset = 0;
+       flash->addr_off = offset;
+       flash->ubuf = buf;
+
+       bfa_flash_write_send(flash);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
+                      void *buf, u32 len, u32 offset,
+                      bfa_cb_flash cbfn, void *cbarg)
+{
+       if (!bfa_nw_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /*
+        * 'len' must be in word (4-byte) boundary
+        */
+       if (!len || (len & 0x03))
+               return BFA_STATUS_FLASH_BAD_LEN;
+
+       if (flash->op_busy)
+               return BFA_STATUS_DEVBUSY;
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->type = type;
+       flash->instance = instance;
+       flash->residue = len;
+       flash->offset = 0;
+       flash->addr_off = offset;
+       flash->ubuf = buf;
+
+       bfa_flash_read_send(flash);
+
+       return BFA_STATUS_OK;
+}
index ca158d1..3b4460f 100644 (file)
@@ -27,6 +27,8 @@
 #define BFA_IOC_HWSEM_TOV      500     /* msecs */
 #define BFA_IOC_HB_TOV         500     /* msecs */
 #define BFA_IOC_POLL_TOV       200     /* msecs */
+#define BNA_DBG_FWTRC_LEN      (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
+                               BFI_IOC_TRC_HDR_SZ)
 
 /**
  * PCI device information required by IOC
@@ -68,6 +70,16 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
        dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
 }
 
+#define bfa_alen_set(__alen, __len, __pa)      \
+       __bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa)
+{
+       alen->al_len = cpu_to_be32(len);
+       bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
 struct bfa_ioc_regs {
        void __iomem *hfn_mbox_cmd;
        void __iomem *hfn_mbox;
@@ -296,6 +308,7 @@ void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
 
 void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
 bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
+bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
 void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
 void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
        struct bfa_ioc_notify *notify);
@@ -307,6 +320,9 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
 bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
                        struct bfi_ioc_image_hdr *fwhdr);
 mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
+int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
+int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
 
 /*
  * Timeout APIs
@@ -322,4 +338,42 @@ void bfa_nw_iocpf_sem_timeout(void *ioc);
 u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
 u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
 
+/*
+ *     Flash module specific
+ */
+typedef void   (*bfa_cb_flash) (void *cbarg, enum bfa_status status);
+
+struct bfa_flash {
+       struct bfa_ioc *ioc;            /* back pointer to ioc */
+       u32             type;           /* partition type */
+       u8              instance;       /* partition instance */
+       u8              rsv[3];
+       u32             op_busy;        /*  operation busy flag */
+       u32             residue;        /*  residual length */
+       u32             offset;         /*  offset */
+       enum bfa_status status;         /*  status */
+       u8              *dbuf_kva;      /*  dma buf virtual address */
+       u64             dbuf_pa;        /*  dma buf physical address */
+       bfa_cb_flash    cbfn;           /*  user callback function */
+       void            *cbarg;         /*  user callback arg */
+       u8              *ubuf;          /*  user supplied buffer */
+       u32             addr_off;       /*  partition address offset */
+       struct bfa_mbox_cmd mb;         /*  mailbox */
+       struct bfa_ioc_notify ioc_notify; /*  ioc event notify */
+};
+
+enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash,
+                       struct bfa_flash_attr *attr,
+                       bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash,
+                       u32 type, u8 instance, void *buf, u32 len, u32 offset,
+                       bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash,
+                       u32 type, u8 instance, void *buf, u32 len, u32 offset,
+                       bfa_cb_flash cbfn, void *cbarg);
+u32    bfa_nw_flash_meminfo(void);
+void   bfa_nw_flash_attach(struct bfa_flash *flash,
+                           struct bfa_ioc *ioc, void *dev);
+void   bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa);
+
 #endif /* __BFA_IOC_H__ */
index 7a1393a..0d9df69 100644 (file)
@@ -83,6 +83,14 @@ union bfi_addr_u {
        } a32;
 };
 
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen {
+       union bfi_addr_u        al_addr;        /* DMA addr of buffer   */
+       u32                     al_len;         /* length of buffer */
+};
+
 /*
  * Large Message structure - 128 Bytes size Msgs
  */
@@ -249,6 +257,8 @@ struct bfi_ioc_getattr_reply {
  */
 #define BFI_IOC_TRC_OFF                (0x4b00)
 #define BFI_IOC_TRC_ENTS       256
+#define BFI_IOC_TRC_ENT_SZ     16
+#define BFI_IOC_TRC_HDR_SZ     32
 
 #define BFI_IOC_FW_SIGNATURE   (0xbfadbfad)
 #define BFI_IOC_MD5SUM_SZ      4
@@ -476,6 +486,93 @@ struct bfi_msgq_i2h_cmdq_copy_req {
        u16     len;
 };
 
+/*
+ *      FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+       BFI_FLASH_H2I_QUERY_REQ = 1,
+       BFI_FLASH_H2I_ERASE_REQ = 2,
+       BFI_FLASH_H2I_WRITE_REQ = 3,
+       BFI_FLASH_H2I_READ_REQ = 4,
+       BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+       BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+       BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+       BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+       BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+       BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+       BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req {
+       struct bfi_mhdr mh;   /* Common msg header */
+       struct bfi_alen alen;
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req {
+       struct bfi_mhdr mh;     /* Common msg header */
+       struct bfi_alen alen;
+       u32     type;   /* partition type */
+       u8      instance; /* partition instance */
+       u8      last;
+       u8      rsv[2];
+       u32     offset;
+       u32     length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req {
+       struct bfi_mhdr mh;     /* Common msg header */
+       u32     type;           /* partition type */
+       u8      instance;       /* partition instance */
+       u8      rsv[3];
+       u32     offset;
+       u32     length;
+       struct bfi_alen alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp {
+       struct bfi_mhdr mh;     /* Common msg header */
+       u32     status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp {
+       struct bfi_mhdr mh;     /* Common msg header */
+       u32     type;           /* partition type */
+       u8      instance;       /* partition instance */
+       u8      rsv[3];
+       u32     status;
+       u32     length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp {
+       struct bfi_mhdr mh;     /* Common msg header */
+       u32     type;           /* partition type */
+       u8      instance;       /* partition instance */
+       u8      rsv[3];
+       u32     status;
+       u32     length;
+};
+
 #pragma pack()
 
 #endif /* __BFI_H__ */
index 26f5c5a..9ccc586 100644 (file)
@@ -1727,6 +1727,7 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
        bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
 
        kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+       bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
 
        /**
         * Attach common modules (Diag, SFP, CEE, Port) and claim respective
@@ -1740,6 +1741,11 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
        kva += bfa_nw_cee_meminfo();
        dma += bfa_nw_cee_meminfo();
 
+       bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
+       bfa_nw_flash_memclaim(&bna->flash, kva, dma);
+       kva += bfa_nw_flash_meminfo();
+       dma += bfa_nw_flash_meminfo();
+
        bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
        bfa_msgq_memclaim(&bna->msgq, kva, dma);
        bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
@@ -1892,7 +1898,8 @@ bna_res_req(struct bna_res_info *res_info)
        res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
        res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
                                (bfa_nw_cee_meminfo() +
-                               bfa_msgq_meminfo()), PAGE_SIZE);
+                                bfa_nw_flash_meminfo() +
+                                bfa_msgq_meminfo()), PAGE_SIZE);
 
        /* DMA memory for retrieving IOC attributes */
        res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
@@ -1904,8 +1911,8 @@ bna_res_req(struct bna_res_info *res_info)
        /* Virtual memory for retreiving fw_trc */
        res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
        res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
-       res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
-       res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+       res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
+       res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
 
        /* DMA memory for retreiving stats */
        res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
index d090fbf..8e57fc5 100644 (file)
@@ -966,6 +966,7 @@ struct bna {
 
        struct bna_ioceth ioceth;
        struct bfa_cee cee;
+       struct bfa_flash flash;
        struct bfa_msgq msgq;
 
        struct bna_ethport ethport;
index 7f3091e..2eddbaa 100644 (file)
@@ -44,11 +44,18 @@ static uint bnad_ioc_auto_recover = 1;
 module_param(bnad_ioc_auto_recover, uint, 0444);
 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
 
+static uint bna_debugfs_enable = 1;
+module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
+                " Range[false:0|true:1]");
+
 /*
  * Global variables
  */
 u32 bnad_rxqs_per_cq = 2;
-
+u32 bna_id;
+struct mutex bnad_list_mutex;
+LIST_HEAD(bnad_list);
 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
 /*
@@ -75,6 +82,23 @@ do {                                                         \
 
 #define BNAD_TXRX_SYNC_MDELAY  250     /* 250 msecs */
 
+static void
+bnad_add_to_list(struct bnad *bnad)
+{
+       mutex_lock(&bnad_list_mutex);
+       list_add_tail(&bnad->list_entry, &bnad_list);
+       bnad->id = bna_id++;
+       mutex_unlock(&bnad_list_mutex);
+}
+
+static void
+bnad_remove_from_list(struct bnad *bnad)
+{
+       mutex_lock(&bnad_list_mutex);
+       list_del(&bnad->list_entry);
+       mutex_unlock(&bnad_list_mutex);
+}
+
 /*
  * Reinitialize completions in CQ, once Rx is taken down
  */
@@ -723,7 +747,7 @@ void
 bnad_cb_ethport_link_status(struct bnad *bnad,
                        enum bna_link_status link_status)
 {
-       bool link_up = 0;
+       bool link_up = false;
 
        link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
 
@@ -1084,6 +1108,16 @@ bnad_cb_enet_mtu_set(struct bnad *bnad)
        complete(&bnad->bnad_completions.mtu_comp);
 }
 
+void
+bnad_cb_completion(void *arg, enum bfa_status status)
+{
+       struct bnad_iocmd_comp *iocmd_comp =
+                       (struct bnad_iocmd_comp *)arg;
+
+       iocmd_comp->comp_status = (u32) status;
+       complete(&iocmd_comp->comp);
+}
+
 /* Resource allocation, free functions */
 
 static void
@@ -2968,7 +3002,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
        return err;
 }
 
-static void
+static int
 bnad_vlan_rx_add_vid(struct net_device *netdev,
                                 unsigned short vid)
 {
@@ -2976,7 +3010,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
        unsigned long flags;
 
        if (!bnad->rx_info[0].rx)
-               return;
+               return 0;
 
        mutex_lock(&bnad->conf_mutex);
 
@@ -2986,9 +3020,11 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        mutex_unlock(&bnad->conf_mutex);
+
+       return 0;
 }
 
-static void
+static int
 bnad_vlan_rx_kill_vid(struct net_device *netdev,
                                  unsigned short vid)
 {
@@ -2996,7 +3032,7 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
        unsigned long flags;
 
        if (!bnad->rx_info[0].rx)
-               return;
+               return 0;
 
        mutex_lock(&bnad->conf_mutex);
 
@@ -3006,6 +3042,8 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        mutex_unlock(&bnad->conf_mutex);
+
+       return 0;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3163,12 +3201,14 @@ bnad_lock_init(struct bnad *bnad)
 {
        spin_lock_init(&bnad->bna_lock);
        mutex_init(&bnad->conf_mutex);
+       mutex_init(&bnad_list_mutex);
 }
 
 static void
 bnad_lock_uninit(struct bnad *bnad)
 {
        mutex_destroy(&bnad->conf_mutex);
+       mutex_destroy(&bnad_list_mutex);
 }
 
 /* PCI Initialization */
@@ -3186,7 +3226,7 @@ bnad_pci_init(struct bnad *bnad,
                goto disable_device;
        if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
            !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-               *using_dac = 1;
+               *using_dac = true;
        } else {
                err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
@@ -3195,7 +3235,7 @@ bnad_pci_init(struct bnad *bnad,
                        if (err)
                                goto release_regions;
                }
-               *using_dac = 0;
+               *using_dac = false;
        }
        pci_set_master(pdev);
        return 0;
@@ -3249,8 +3289,8 @@ bnad_pci_probe(struct pci_dev *pdev,
                return err;
        }
        bnad = netdev_priv(netdev);
-
        bnad_lock_init(bnad);
+       bnad_add_to_list(bnad);
 
        mutex_lock(&bnad->conf_mutex);
        /*
@@ -3277,6 +3317,10 @@ bnad_pci_probe(struct pci_dev *pdev,
        /* Set link to down state */
        netif_carrier_off(netdev);
 
+       /* Setup the debugfs node for this bfad */
+       if (bna_debugfs_enable)
+               bnad_debugfs_init(bnad);
+
        /* Get resource requirement form bna */
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_res_req(&bnad->res_info[0]);
@@ -3398,11 +3442,15 @@ disable_ioceth:
 res_free:
        bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
 drv_uninit:
+       /* Remove the debugfs node for this bnad */
+       kfree(bnad->regdata);
+       bnad_debugfs_uninit(bnad);
        bnad_uninit(bnad);
 pci_uninit:
        bnad_pci_uninit(pdev);
 unlock_mutex:
        mutex_unlock(&bnad->conf_mutex);
+       bnad_remove_from_list(bnad);
        bnad_lock_uninit(bnad);
        free_netdev(netdev);
        return err;
@@ -3441,7 +3489,11 @@ bnad_pci_remove(struct pci_dev *pdev)
        bnad_disable_msix(bnad);
        bnad_pci_uninit(pdev);
        mutex_unlock(&bnad->conf_mutex);
+       bnad_remove_from_list(bnad);
        bnad_lock_uninit(bnad);
+       /* Remove the debugfs node for this bnad */
+       kfree(bnad->regdata);
+       bnad_debugfs_uninit(bnad);
        bnad_uninit(bnad);
        free_netdev(netdev);
 }
index 5487ca4..c975ce6 100644 (file)
@@ -124,6 +124,12 @@ enum bnad_link_state {
        BNAD_LS_UP              = 1
 };
 
+struct bnad_iocmd_comp {
+       struct bnad             *bnad;
+       struct completion       comp;
+       int                     comp_status;
+};
+
 struct bnad_completion {
        struct completion       ioc_comp;
        struct completion       ucast_comp;
@@ -251,6 +257,8 @@ struct bnad_unmap_q {
 
 struct bnad {
        struct net_device       *netdev;
+       u32                     id;
+       struct list_head        list_entry;
 
        /* Data path */
        struct bnad_tx_info tx_info[BNAD_MAX_TX];
@@ -320,6 +328,20 @@ struct bnad {
        char                    adapter_name[BNAD_NAME_LEN];
        char                    port_name[BNAD_NAME_LEN];
        char                    mbox_irq_name[BNAD_NAME_LEN];
+
+       /* debugfs specific data */
+       char    *regdata;
+       u32     reglen;
+       struct dentry *bnad_dentry_files[5];
+       struct dentry *port_debugfs_root;
+};
+
+struct bnad_drvinfo {
+       struct bfa_ioc_attr  ioc_attr;
+       struct bfa_cee_attr  cee_attr;
+       struct bfa_flash_attr flash_attr;
+       u32     cee_status;
+       u32     flash_status;
 };
 
 /*
@@ -340,6 +362,7 @@ extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
 extern int bnad_enable_default_bcast(struct bnad *bnad);
 extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
 extern void bnad_set_ethtool_ops(struct net_device *netdev);
+extern void bnad_cb_completion(void *arg, enum bfa_status status);
 
 /* Configuration & setup */
 extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
@@ -359,6 +382,10 @@ extern void bnad_netdev_qstats_fill(struct bnad *bnad,
 extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
                struct rtnl_link_stats64 *stats);
 
+/* Debugfs */
+void   bnad_debugfs_init(struct bnad *bnad);
+void   bnad_debugfs_uninit(struct bnad *bnad);
+
 /**
  * MACROS
  */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
new file mode 100644 (file)
index 0000000..592ad39
--- /dev/null
@@ -0,0 +1,623 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include "bnad.h"
+
+/*
+ * BNA debufs interface
+ *
+ * To access the interface, debugfs file system should be mounted
+ * if not already mounted using:
+ *     mount -t debugfs none /sys/kernel/debug
+ *
+ * BNA Hierarchy:
+ *     - bna/pci_dev:<pci_name>
+ * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna
+ *
+ * Debugging service available per pci_dev:
+ *     fwtrc:  To collect current firmware trace.
+ *     fwsave: To collect last saved fw trace as a result of firmware crash.
+ *     regwr:  To write one word to chip register
+ *     regrd:  To read one or more words from chip register.
+ */
+
+struct bnad_debug_info {
+       char *debug_buffer;
+       void *i_private;
+       int buffer_len;
+};
+
+static int
+bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
+{
+       struct bnad *bnad = inode->i_private;
+       struct bnad_debug_info *fw_debug;
+       unsigned long flags;
+       int rc;
+
+       fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+       if (!fw_debug)
+               return -ENOMEM;
+
+       fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+       fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+       if (!fw_debug->debug_buffer) {
+               kfree(fw_debug);
+               fw_debug = NULL;
+               pr_warn("bna %s: Failed to allocate fwtrc buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc,
+                       fw_debug->debug_buffer,
+                       &fw_debug->buffer_len);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       if (rc != BFA_STATUS_OK) {
+               kfree(fw_debug->debug_buffer);
+               fw_debug->debug_buffer = NULL;
+               kfree(fw_debug);
+               fw_debug = NULL;
+               pr_warn("bnad %s: Failed to collect fwtrc\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       file->private_data = fw_debug;
+
+       return 0;
+}
+
+static int
+bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
+{
+       struct bnad *bnad = inode->i_private;
+       struct bnad_debug_info *fw_debug;
+       unsigned long flags;
+       int rc;
+
+       fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+       if (!fw_debug)
+               return -ENOMEM;
+
+       fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+       fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+       if (!fw_debug->debug_buffer) {
+               kfree(fw_debug);
+               fw_debug = NULL;
+               pr_warn("bna %s: Failed to allocate fwsave buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc,
+                       fw_debug->debug_buffer,
+                       &fw_debug->buffer_len);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) {
+               kfree(fw_debug->debug_buffer);
+               fw_debug->debug_buffer = NULL;
+               kfree(fw_debug);
+               fw_debug = NULL;
+               pr_warn("bna %s: Failed to collect fwsave\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       file->private_data = fw_debug;
+
+       return 0;
+}
+
+static int
+bnad_debugfs_open_reg(struct inode *inode, struct file *file)
+{
+       struct bnad_debug_info *reg_debug;
+
+       reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+       if (!reg_debug)
+               return -ENOMEM;
+
+       reg_debug->i_private = inode->i_private;
+
+       file->private_data = reg_debug;
+
+       return 0;
+}
+
+static int
+bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
+{
+       struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer;
+       struct bnad_iocmd_comp fcomp;
+       unsigned long flags = 0;
+       int ret = BFA_STATUS_FAILED;
+
+       /* Get IOC info */
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+       /* Retrieve CEE related info */
+       fcomp.bnad = bnad;
+       fcomp.comp_status = 0;
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr,
+                               bnad_cb_completion, &fcomp);
+       if (ret != BFA_STATUS_OK) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               goto out;
+       }
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       wait_for_completion(&fcomp.comp);
+       drvinfo->cee_status = fcomp.comp_status;
+
+       /* Retrieve flash partition info */
+       fcomp.comp_status = 0;
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
+                               bnad_cb_completion, &fcomp);
+       if (ret != BFA_STATUS_OK) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               goto out;
+       }
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       wait_for_completion(&fcomp.comp);
+       drvinfo->flash_status = fcomp.comp_status;
+out:
+       return ret;
+}
+
+static int
+bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
+{
+       struct bnad *bnad = inode->i_private;
+       struct bnad_debug_info *drv_info;
+       int rc;
+
+       drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+       if (!drv_info)
+               return -ENOMEM;
+
+       drv_info->buffer_len = sizeof(struct bnad_drvinfo);
+
+       drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL);
+       if (!drv_info->debug_buffer) {
+               kfree(drv_info);
+               drv_info = NULL;
+               pr_warn("bna %s: Failed to allocate drv info buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       mutex_lock(&bnad->conf_mutex);
+       rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer,
+                               drv_info->buffer_len);
+       mutex_unlock(&bnad->conf_mutex);
+       if (rc != BFA_STATUS_OK) {
+               kfree(drv_info->debug_buffer);
+               drv_info->debug_buffer = NULL;
+               kfree(drv_info);
+               drv_info = NULL;
+               pr_warn("bna %s: Failed to collect drvinfo\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       file->private_data = drv_info;
+
+       return 0;
+}
+
+/* Changes the current file position */
+static loff_t
+bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
+{
+       loff_t pos = file->f_pos;
+       struct bnad_debug_info *debug = file->private_data;
+
+       if (!debug)
+               return -EINVAL;
+
+       switch (orig) {
+       case 0:
+               file->f_pos = offset;
+               break;
+       case 1:
+               file->f_pos += offset;
+               break;
+       case 2:
+               file->f_pos = debug->buffer_len - offset;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (file->f_pos < 0 || file->f_pos > debug->buffer_len) {
+               file->f_pos = pos;
+               return -EINVAL;
+       }
+
+       return file->f_pos;
+}
+
+static ssize_t
+bnad_debugfs_read(struct file *file, char __user *buf,
+                 size_t nbytes, loff_t *pos)
+{
+       struct bnad_debug_info *debug = file->private_data;
+
+       if (!debug || !debug->debug_buffer)
+               return 0;
+
+       return simple_read_from_buffer(buf, nbytes, pos,
+                               debug->debug_buffer, debug->buffer_len);
+}
+
+#define BFA_REG_CT_ADDRSZ      (0x40000)
+#define BFA_REG_CB_ADDRSZ      (0x20000)
+#define BFA_REG_ADDRSZ(__ioc)  \
+       ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ?  \
+        BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
+
+/*
+ * Function to check if the register offset passed is valid.
+ */
+static int
+bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len)
+{
+       u8 area;
+
+       /* check [16:15] */
+       area = (offset >> 15) & 0x7;
+       if (area == 0) {
+               /* PCIe core register */
+               if ((offset + (len<<2)) > 0x8000)       /* 8k dwords or 32KB */
+                       return BFA_STATUS_EINVAL;
+       } else if (area == 0x1) {
+               /* CB 32 KB memory page */
+               if ((offset + (len<<2)) > 0x10000)      /* 8k dwords or 32KB */
+                       return BFA_STATUS_EINVAL;
+       } else {
+               /* CB register space 64KB */
+               if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc))
+                       return BFA_STATUS_EINVAL;
+       }
+       return BFA_STATUS_OK;
+}
+
+static ssize_t
+bnad_debugfs_read_regrd(struct file *file, char __user *buf,
+                       size_t nbytes, loff_t *pos)
+{
+       struct bnad_debug_info *regrd_debug = file->private_data;
+       struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+       ssize_t rc;
+
+       if (!bnad->regdata)
+               return 0;
+
+       rc = simple_read_from_buffer(buf, nbytes, pos,
+                       bnad->regdata, bnad->reglen);
+
+       if ((*pos + nbytes) >= bnad->reglen) {
+               kfree(bnad->regdata);
+               bnad->regdata = NULL;
+               bnad->reglen = 0;
+       }
+
+       return rc;
+}
+
+static ssize_t
+bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
+               size_t nbytes, loff_t *ppos)
+{
+       struct bnad_debug_info *regrd_debug = file->private_data;
+       struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+       struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+       int addr, len, rc, i;
+       u32 *regbuf;
+       void __iomem *rb, *reg_addr;
+       unsigned long flags;
+       void *kern_buf;
+
+       /* Allocate memory to store the user space buf */
+       kern_buf = kzalloc(nbytes, GFP_KERNEL);
+       if (!kern_buf) {
+               pr_warn("bna %s: Failed to allocate user buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+               kfree(kern_buf);
+               return -ENOMEM;
+       }
+
+       rc = sscanf(kern_buf, "%x:%x", &addr, &len);
+       if (rc < 2) {
+               pr_warn("bna %s: Failed to read user buffer\n",
+                       pci_name(bnad->pcidev));
+               kfree(kern_buf);
+               return -EINVAL;
+       }
+
+       kfree(kern_buf);
+       kfree(bnad->regdata);
+       bnad->regdata = NULL;
+       bnad->reglen = 0;
+
+       bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
+       if (!bnad->regdata) {
+               pr_warn("bna %s: Failed to allocate regrd buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       bnad->reglen = len << 2;
+       rb = bfa_ioc_bar0(ioc);
+       addr &= BFA_REG_ADDRMSK(ioc);
+
+       /* offset and len sanity check */
+       rc = bna_reg_offset_check(ioc, addr, len);
+       if (rc) {
+               pr_warn("bna %s: Failed reg offset check\n",
+                       pci_name(bnad->pcidev));
+               kfree(bnad->regdata);
+               bnad->regdata = NULL;
+               bnad->reglen = 0;
+               return -EINVAL;
+       }
+
+       reg_addr = rb + addr;
+       regbuf =  (u32 *)bnad->regdata;
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       for (i = 0; i < len; i++) {
+               *regbuf = readl(reg_addr);
+               regbuf++;
+               reg_addr += sizeof(u32);
+       }
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+       return nbytes;
+}
+
+static ssize_t
+bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
+               size_t nbytes, loff_t *ppos)
+{
+       struct bnad_debug_info *debug = file->private_data;
+       struct bnad *bnad = (struct bnad *)debug->i_private;
+       struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+       int addr, val, rc;
+       void __iomem *reg_addr;
+       unsigned long flags;
+       void *kern_buf;
+
+       /* Allocate memory to store the user space buf */
+       kern_buf = kzalloc(nbytes, GFP_KERNEL);
+       if (!kern_buf) {
+               pr_warn("bna %s: Failed to allocate user buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+               kfree(kern_buf);
+               return -ENOMEM;
+       }
+
+       rc = sscanf(kern_buf, "%x:%x", &addr, &val);
+       if (rc < 2) {
+               pr_warn("bna %s: Failed to read user buffer\n",
+                       pci_name(bnad->pcidev));
+               kfree(kern_buf);
+               return -EINVAL;
+       }
+       kfree(kern_buf);
+
+       addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
+
+       /* offset and len sanity check */
+       rc = bna_reg_offset_check(ioc, addr, 1);
+       if (rc) {
+               pr_warn("bna %s: Failed reg offset check\n",
+                       pci_name(bnad->pcidev));
+               return -EINVAL;
+       }
+
+       reg_addr = (bfa_ioc_bar0(ioc)) + addr;
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       writel(val, reg_addr);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+       return nbytes;
+}
+
+static int
+bnad_debugfs_release(struct inode *inode, struct file *file)
+{
+       struct bnad_debug_info *debug = file->private_data;
+
+       if (!debug)
+               return 0;
+
+       file->private_data = NULL;
+       kfree(debug);
+       return 0;
+}
+
+static int
+bnad_debugfs_buffer_release(struct inode *inode, struct file *file)
+{
+       struct bnad_debug_info *debug = file->private_data;
+
+       if (!debug)
+               return 0;
+
+       kfree(debug->debug_buffer);
+
+       file->private_data = NULL;
+       kfree(debug);
+       debug = NULL;
+       return 0;
+}
+
+static const struct file_operations bnad_debugfs_op_fwtrc = {
+       .owner          =       THIS_MODULE,
+       .open           =       bnad_debugfs_open_fwtrc,
+       .llseek         =       bnad_debugfs_lseek,
+       .read           =       bnad_debugfs_read,
+       .release        =       bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_fwsave = {
+       .owner          =       THIS_MODULE,
+       .open           =       bnad_debugfs_open_fwsave,
+       .llseek         =       bnad_debugfs_lseek,
+       .read           =       bnad_debugfs_read,
+       .release        =       bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regrd = {
+       .owner          =       THIS_MODULE,
+       .open           =       bnad_debugfs_open_reg,
+       .llseek         =       bnad_debugfs_lseek,
+       .read           =       bnad_debugfs_read_regrd,
+       .write          =       bnad_debugfs_write_regrd,
+       .release        =       bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regwr = {
+       .owner          =       THIS_MODULE,
+       .open           =       bnad_debugfs_open_reg,
+       .llseek         =       bnad_debugfs_lseek,
+       .write          =       bnad_debugfs_write_regwr,
+       .release        =       bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_drvinfo = {
+       .owner          =       THIS_MODULE,
+       .open           =       bnad_debugfs_open_drvinfo,
+       .llseek         =       bnad_debugfs_lseek,
+       .read           =       bnad_debugfs_read,
+       .release        =       bnad_debugfs_buffer_release,
+};
+
+struct bnad_debugfs_entry {
+       const char *name;
+       mode_t  mode;
+       const struct file_operations *fops;
+};
+
+static const struct bnad_debugfs_entry bnad_debugfs_files[] = {
+       { "fwtrc",  S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, },
+       { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, },
+       { "regrd",  S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, },
+       { "regwr",  S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, },
+       { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, },
+};
+
+static struct dentry *bna_debugfs_root;
+static atomic_t bna_debugfs_port_count;
+
+/* Initialize debugfs interface for BNA */
+void
+bnad_debugfs_init(struct bnad *bnad)
+{
+       const struct bnad_debugfs_entry *file;
+       char name[64];
+       int i;
+
+       /* Setup the BNA debugfs root directory*/
+       if (!bna_debugfs_root) {
+               bna_debugfs_root = debugfs_create_dir("bna", NULL);
+               atomic_set(&bna_debugfs_port_count, 0);
+               if (!bna_debugfs_root) {
+                       pr_warn("BNA: debugfs root dir creation failed\n");
+                       return;
+               }
+       }
+
+       /* Setup the pci_dev debugfs directory for the port */
+       snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev));
+       if (!bnad->port_debugfs_root) {
+               bnad->port_debugfs_root =
+                       debugfs_create_dir(name, bna_debugfs_root);
+               if (!bnad->port_debugfs_root) {
+                       pr_warn("bna pci_dev %s: root dir creation failed\n",
+                               pci_name(bnad->pcidev));
+                       return;
+               }
+
+               atomic_inc(&bna_debugfs_port_count);
+
+               for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+                       file = &bnad_debugfs_files[i];
+                       bnad->bnad_dentry_files[i] =
+                                       debugfs_create_file(file->name,
+                                                       file->mode,
+                                                       bnad->port_debugfs_root,
+                                                       bnad,
+                                                       file->fops);
+                       if (!bnad->bnad_dentry_files[i]) {
+                               pr_warn(
+                                    "BNA pci_dev:%s: create %s entry failed\n",
+                                    pci_name(bnad->pcidev), file->name);
+                               return;
+                       }
+               }
+       }
+}
+
+/* Uninitialize debugfs interface for BNA */
+void
+bnad_debugfs_uninit(struct bnad *bnad)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+               if (bnad->bnad_dentry_files[i]) {
+                       debugfs_remove(bnad->bnad_dentry_files[i]);
+                       bnad->bnad_dentry_files[i] = NULL;
+               }
+       }
+
+       /* Remove the pci_dev debugfs directory for the port */
+       if (bnad->port_debugfs_root) {
+               debugfs_remove(bnad->port_debugfs_root);
+               bnad->port_debugfs_root = NULL;
+               atomic_dec(&bna_debugfs_port_count);
+       }
+
+       /* Remove the BNA debugfs root directory */
+       if (atomic_read(&bna_debugfs_port_count) == 0) {
+               debugfs_remove(bna_debugfs_root);
+               bna_debugfs_root = NULL;
+       }
+}
index 38d5c66..5f7be5a 100644 (file)
@@ -935,6 +935,143 @@ bnad_get_sset_count(struct net_device *netdev, int sset)
        }
 }
 
+static u32
+bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
+                               u32 *base_offset)
+{
+       struct bfa_flash_attr *flash_attr;
+       struct bnad_iocmd_comp fcomp;
+       u32 i, flash_part = 0, ret;
+       unsigned long flags = 0;
+
+       flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
+       if (!flash_attr)
+               return -ENOMEM;
+
+       fcomp.bnad = bnad;
+       fcomp.comp_status = 0;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
+                               bnad_cb_completion, &fcomp);
+       if (ret != BFA_STATUS_OK) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               kfree(flash_attr);
+               goto out_err;
+       }
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       wait_for_completion(&fcomp.comp);
+       ret = fcomp.comp_status;
+
+       /* Check for the flash type & base offset value */
+       if (ret == BFA_STATUS_OK) {
+               for (i = 0; i < flash_attr->npart; i++) {
+                       if (offset >= flash_attr->part[i].part_off &&
+                           offset < (flash_attr->part[i].part_off +
+                                     flash_attr->part[i].part_size)) {
+                               flash_part = flash_attr->part[i].part_type;
+                               *base_offset = flash_attr->part[i].part_off;
+                               break;
+                       }
+               }
+       }
+       kfree(flash_attr);
+       return flash_part;
+out_err:
+       return -EINVAL;
+}
+
+static int
+bnad_get_eeprom_len(struct net_device *netdev)
+{
+       return BFA_TOTAL_FLASH_SIZE;
+}
+
+static int
+bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+               u8 *bytes)
+{
+       struct bnad *bnad = netdev_priv(netdev);
+       struct bnad_iocmd_comp fcomp;
+       u32 flash_part = 0, base_offset = 0;
+       unsigned long flags = 0;
+       int ret = 0;
+
+       /* Check if the flash read request is valid */
+       if (eeprom->magic != (bnad->pcidev->vendor |
+                            (bnad->pcidev->device << 16)))
+               return -EFAULT;
+
+       /* Query the flash partition based on the offset */
+       flash_part = bnad_get_flash_partition_by_offset(bnad,
+                               eeprom->offset, &base_offset);
+       if (flash_part <= 0)
+               return -EFAULT;
+
+       fcomp.bnad = bnad;
+       fcomp.comp_status = 0;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
+                               bnad->id, bytes, eeprom->len,
+                               eeprom->offset - base_offset,
+                               bnad_cb_completion, &fcomp);
+       if (ret != BFA_STATUS_OK) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               goto done;
+       }
+
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       wait_for_completion(&fcomp.comp);
+       ret = fcomp.comp_status;
+done:
+       return ret;
+}
+
+static int
+bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+               u8 *bytes)
+{
+       struct bnad *bnad = netdev_priv(netdev);
+       struct bnad_iocmd_comp fcomp;
+       u32 flash_part = 0, base_offset = 0;
+       unsigned long flags = 0;
+       int ret = 0;
+
+       /* Check if the flash update request is valid */
+       if (eeprom->magic != (bnad->pcidev->vendor |
+                            (bnad->pcidev->device << 16)))
+               return -EINVAL;
+
+       /* Query the flash partition based on the offset */
+       flash_part = bnad_get_flash_partition_by_offset(bnad,
+                               eeprom->offset, &base_offset);
+       if (flash_part <= 0)
+               return -EFAULT;
+
+       fcomp.bnad = bnad;
+       fcomp.comp_status = 0;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
+                               bnad->id, bytes, eeprom->len,
+                               eeprom->offset - base_offset,
+                               bnad_cb_completion, &fcomp);
+       if (ret != BFA_STATUS_OK) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               goto done;
+       }
+
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       wait_for_completion(&fcomp.comp);
+       ret = fcomp.comp_status;
+done:
+       return ret;
+}
+
 static struct ethtool_ops bnad_ethtool_ops = {
        .get_settings = bnad_get_settings,
        .set_settings = bnad_set_settings,
@@ -949,7 +1086,10 @@ static struct ethtool_ops bnad_ethtool_ops = {
        .set_pauseparam = bnad_set_pauseparam,
        .get_strings = bnad_get_strings,
        .get_ethtool_stats = bnad_get_ethtool_stats,
-       .get_sset_count = bnad_get_sset_count
+       .get_sset_count = bnad_get_sset_count,
+       .get_eeprom_len = bnad_get_eeprom_len,
+       .get_eeprom = bnad_get_eeprom,
+       .set_eeprom = bnad_set_eeprom,
 };
 
 void
index 7f7882d..65e4b28 100644 (file)
@@ -969,7 +969,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
        case (NETEVENT_REDIRECT):{
                struct netevent_redirect *nr = ctx;
                cxgb_redirect(nr->old, nr->new);
-               cxgb_neigh_update(dst_get_neighbour(nr->new));
+               cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
                break;
        }
        default:
@@ -1072,8 +1072,11 @@ static int is_offloading(struct net_device *dev)
 
 static void cxgb_neigh_update(struct neighbour *neigh)
 {
-       struct net_device *dev = neigh->dev;
+       struct net_device *dev;
 
+       if (!neigh)
+               return;
+       dev = neigh->dev;
        if (dev && (is_offloading(dev))) {
                struct t3cdev *tdev = dev2t3cdev(dev);
 
@@ -1107,6 +1110,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
 static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
 {
        struct net_device *olddev, *newdev;
+       struct neighbour *n;
        struct tid_info *ti;
        struct t3cdev *tdev;
        u32 tid;
@@ -1114,8 +1118,16 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
        struct l2t_entry *e;
        struct t3c_tid_entry *te;
 
-       olddev = dst_get_neighbour(old)->dev;
-       newdev = dst_get_neighbour(new)->dev;
+       n = dst_get_neighbour_noref(old);
+       if (!n)
+               return;
+       olddev = n->dev;
+
+       n = dst_get_neighbour_noref(new);
+       if (!n)
+               return;
+       newdev = n->dev;
+
        if (!is_offloading(olddev))
                return;
        if (!is_offloading(newdev)) {
@@ -1132,7 +1144,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
        }
 
        /* Add new L2T entry */
-       e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
+       e = t3_l2t_get(tdev, new, newdev);
        if (!e) {
                printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
                       __func__);
index 70fec8b..3fa3c88 100644 (file)
@@ -298,18 +298,31 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
        spin_unlock(&e->lock);
 }
 
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
                             struct net_device *dev)
 {
        struct l2t_entry *e = NULL;
+       struct neighbour *neigh;
+       struct port_info *p;
        struct l2t_data *d;
        int hash;
-       u32 addr = *(u32 *) neigh->primary_key;
-       int ifidx = neigh->dev->ifindex;
-       struct port_info *p = netdev_priv(dev);
-       int smt_idx = p->port_id;
+       u32 addr;
+       int ifidx;
+       int smt_idx;
 
        rcu_read_lock();
+       neigh = dst_get_neighbour_noref(dst);
+       if (!neigh)
+               goto done_rcu;
+
+       addr = *(u32 *) neigh->primary_key;
+       ifidx = neigh->dev->ifindex;
+
+       if (!dev)
+               dev = neigh->dev;
+       p = netdev_priv(dev);
+       smt_idx = p->port_id;
+
        d = L2DATA(cdev);
        if (!d)
                goto done_rcu;
@@ -323,7 +336,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
                        l2t_hold(d, e);
                        if (atomic_read(&e->refcnt) == 1)
                                reuse_entry(e, neigh);
-                       goto done;
+                       goto done_unlock;
                }
 
        /* Need to allocate a new entry */
@@ -344,7 +357,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
                        e->vlan = VLAN_NONE;
                spin_unlock(&e->lock);
        }
-done:
+done_unlock:
        write_unlock_bh(&d->lock);
 done_rcu:
        rcu_read_unlock();
index c5f5479..c4e8643 100644 (file)
@@ -109,7 +109,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
 
 void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
 void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
                             struct net_device *dev);
 int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
                     struct l2t_entry *e);
index a34e7ce..7b6b43d 100644 (file)
@@ -243,7 +243,7 @@ module_param_array(intr_cnt, uint, NULL, 0644);
 MODULE_PARM_DESC(intr_cnt,
                 "thresholds 1..3 for queue interrupt packet counters");
 
-static int vf_acls;
+static bool vf_acls;
 
 #ifdef CONFIG_PCI_IOV
 module_param(vf_acls, bool, 0644);
@@ -1871,30 +1871,30 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
        return err;
 }
 
-static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
+static u32 get_rss_table_size(struct net_device *dev)
 {
        const struct port_info *pi = netdev_priv(dev);
-       unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
 
-       p->size = pi->rss_size;
+       return pi->rss_size;
+}
+
+static int get_rss_table(struct net_device *dev, u32 *p)
+{
+       const struct port_info *pi = netdev_priv(dev);
+       unsigned int n = pi->rss_size;
+
        while (n--)
-               p->ring_index[n] = pi->rss[n];
+               p[n] = pi->rss[n];
        return 0;
 }
 
-static int set_rss_table(struct net_device *dev,
-                        const struct ethtool_rxfh_indir *p)
+static int set_rss_table(struct net_device *dev, const u32 *p)
 {
        unsigned int i;
        struct port_info *pi = netdev_priv(dev);
 
-       if (p->size != pi->rss_size)
-               return -EINVAL;
-       for (i = 0; i < p->size; i++)
-               if (p->ring_index[i] >= pi->nqsets)
-                       return -EINVAL;
-       for (i = 0; i < p->size; i++)
-               pi->rss[i] = p->ring_index[i];
+       for (i = 0; i < pi->rss_size; i++)
+               pi->rss[i] = p[i];
        if (pi->adapter->flags & FULL_INIT_DONE)
                return write_rss(pi, pi->rss);
        return 0;
@@ -1989,6 +1989,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
        .get_wol           = get_wol,
        .set_wol           = set_wol,
        .get_rxnfc         = get_rxnfc,
+       .get_rxfh_indir_size = get_rss_table_size,
        .get_rxfh_indir    = get_rss_table,
        .set_rxfh_indir    = set_rss_table,
        .flash_device      = set_flash,
@@ -3448,7 +3449,7 @@ static int __devinit init_rss(struct adapter *adap)
                if (!pi->rss)
                        return -ENOMEM;
                for (j = 0; j < pi->rss_size; j++)
-                       pi->rss[j] = j % pi->nqsets;
+                       pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
        }
        return 0;
 }
index fd6247b..bf0fc56 100644 (file)
@@ -212,23 +212,29 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
 }
 
 /* rtnl lock is held */
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct enic *enic = netdev_priv(netdev);
+       int err;
 
        spin_lock(&enic->devcmd_lock);
-       enic_add_vlan(enic, vid);
+       err = enic_add_vlan(enic, vid);
        spin_unlock(&enic->devcmd_lock);
+
+       return err;
 }
 
 /* rtnl lock is held */
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct enic *enic = netdev_priv(netdev);
+       int err;
 
        spin_lock(&enic->devcmd_lock);
-       enic_del_vlan(enic, vid);
+       err = enic_del_vlan(enic, vid);
        spin_unlock(&enic->devcmd_lock);
+
+       return err;
 }
 
 int enic_dev_enable2(struct enic *enic, int active)
index 1f83a47..da1cba3 100644 (file)
@@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
        int broadcast, int promisc, int allmulti);
 int enic_dev_add_addr(struct enic *enic, u8 *addr);
 int enic_dev_del_addr(struct enic *enic, u8 *addr);
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 int enic_dev_notify_unset(struct enic *enic);
 int enic_dev_hang_notify(struct enic *enic);
 int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
index 871bcaa..4d71f5a 100644 (file)
@@ -2127,14 +2127,9 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
     u_long iobase = 0;                     /* Clear upper 32 bits in Alphas */
     int i, j;
     struct de4x5_private *lp = netdev_priv(dev);
-    struct list_head *walk;
-
-    list_for_each(walk, &pdev->bus_list) {
-       struct pci_dev *this_dev = pci_dev_b(walk);
-
-       /* Skip the pci_bus list entry */
-       if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue;
+    struct pci_dev *this_dev;
 
+    list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
        vendor = this_dev->vendor;
        device = this_dev->device << 8;
        if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
@@ -5196,7 +5191,7 @@ de4x5_parse_params(struct net_device *dev)
     struct de4x5_private *lp = netdev_priv(dev);
     char *p, *q, t;
 
-    lp->params.fdx = 0;
+    lp->params.fdx = false;
     lp->params.autosense = AUTO;
 
     if (args == NULL) return;
@@ -5206,7 +5201,7 @@ de4x5_parse_params(struct net_device *dev)
        t = *q;
        *q = '\0';
 
-       if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
+       if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
 
        if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
            if (strstr(p, "TP")) {
index 23a6539..c24fab1 100644 (file)
@@ -59,7 +59,7 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $,  Bjorn Ekwall (bj
 
 #include "de600.h"
 
-static unsigned int check_lost = 1;
+static bool check_lost = true;
 module_param(check_lost, bool, 0);
 MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
 
index 34f162d..995198d 100644 (file)
@@ -40,6 +40,7 @@
 #define OC_NAME                        "Emulex OneConnect 10Gbps NIC"
 #define OC_NAME_BE             OC_NAME "(be3)"
 #define OC_NAME_LANCER         OC_NAME "(Lancer)"
+#define OC_NAME_SH             OC_NAME "(Skyhawk)"
 #define DRV_DESC               "ServerEngines BladeEngine 10Gbps NIC Driver"
 
 #define BE_VENDOR_ID           0x19a2
@@ -50,6 +51,7 @@
 #define OC_DEVICE_ID2          0x710   /* Device Id for BE3 cards */
 #define OC_DEVICE_ID3          0xe220  /* Device id for Lancer cards */
 #define OC_DEVICE_ID4           0xe228   /* Device id for VF in Lancer */
+#define OC_DEVICE_ID5          0x720   /* Device Id for Skyhawk cards */
 
 static inline char *nic_name(struct pci_dev *pdev)
 {
@@ -63,6 +65,8 @@ static inline char *nic_name(struct pci_dev *pdev)
                return OC_NAME_LANCER;
        case BE_DEVICE_ID2:
                return BE3_NAME;
+       case OC_DEVICE_ID5:
+               return OC_NAME_SH;
        default:
                return BE_NAME;
        }
@@ -288,11 +292,11 @@ struct be_drv_stats {
 };
 
 struct be_vf_cfg {
-       unsigned char vf_mac_addr[ETH_ALEN];
-       int vf_if_handle;
-       int vf_pmac_id;
-       u16 vf_vlan_tag;
-       u32 vf_tx_rate;
+       unsigned char mac_addr[ETH_ALEN];
+       int if_handle;
+       int pmac_id;
+       u16 vlan_tag;
+       u32 tx_rate;
 };
 
 struct be_adapter {
@@ -368,16 +372,20 @@ struct be_adapter {
        u32 flash_status;
        struct completion flash_compl;
 
-       bool be3_native;
-       bool sriov_enabled;
-       struct be_vf_cfg *vf_cfg;
+       u32 num_vfs;
        u8 is_virtfn;
+       struct be_vf_cfg *vf_cfg;
+       bool be3_native;
        u32 sli_family;
        u8 hba_port_num;
        u16 pvid;
 };
 
 #define be_physfn(adapter) (!adapter->is_virtfn)
+#define        sriov_enabled(adapter)          (adapter->num_vfs > 0)
+#define for_all_vfs(adapter, vf_cfg, i)                                        \
+       for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
+               i++, vf_cfg++)
 
 /* BladeEngine Generation numbers */
 #define BE_GEN2 2
index 575c783..6ba2dc6 100644 (file)
@@ -520,16 +520,13 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        return 0;
 }
 
-static void
-be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+static void be_get_ringparam(struct net_device *netdev,
+                            struct ethtool_ringparam *ring)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
-       ring->rx_max_pending = adapter->rx_obj[0].q.len;
-       ring->tx_max_pending = adapter->tx_obj[0].q.len;
-
-       ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
-       ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
+       ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
+       ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
 }
 
 static void
index 7236280..76f3a98 100644 (file)
@@ -27,13 +27,14 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
 MODULE_AUTHOR("ServerEngines Corporation");
 MODULE_LICENSE("GPL");
 
-static ushort rx_frag_size = 2048;
 static unsigned int num_vfs;
-module_param(rx_frag_size, ushort, S_IRUGO);
 module_param(num_vfs, uint, S_IRUGO);
-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
 
+static ushort rx_frag_size = 2048;
+module_param(rx_frag_size, ushort, S_IRUGO);
+MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+
 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -41,6 +42,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+       { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
        { 0 }
 };
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -316,6 +318,8 @@ static void populate_be3_stats(struct be_adapter *adapter)
        struct be_drv_stats *drvs = &adapter->drv_stats;
 
        be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+       drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+       drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
        drvs->rx_pause_frames = port_stats->rx_pause_frames;
        drvs->rx_crc_errors = port_stats->rx_crc_errors;
        drvs->rx_control_frames = port_stats->rx_control_frames;
@@ -550,11 +554,26 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
        wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
 }
 
+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
+                                       struct sk_buff *skb)
+{
+       u8 vlan_prio;
+       u16 vlan_tag;
+
+       vlan_tag = vlan_tx_tag_get(skb);
+       vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+       /* If vlan priority provided by OS is NOT in available bmap */
+       if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
+               vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
+                               adapter->recommended_prio;
+
+       return vlan_tag;
+}
+
 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
                struct sk_buff *skb, u32 wrb_cnt, u32 len)
 {
-       u8 vlan_prio = 0;
-       u16 vlan_tag = 0;
+       u16 vlan_tag;
 
        memset(hdr, 0, sizeof(*hdr));
 
@@ -585,12 +604,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
 
        if (vlan_tx_tag_present(skb)) {
                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
-               vlan_tag = vlan_tx_tag_get(skb);
-               vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-               /* If vlan priority provided by OS is NOT in available bmap */
-               if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
-                       vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
-                                       adapter->recommended_prio;
+               vlan_tag = be_get_tx_vlan_tag(adapter, skb);
                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
        }
 
@@ -693,6 +707,25 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
        u32 start = txq->head;
        bool dummy_wrb, stopped = false;
 
+       /* For vlan tagged pkts, BE
+        * 1) calculates checksum even when CSO is not requested
+        * 2) calculates checksum wrongly for padded pkt less than
+        * 60 bytes long.
+        * As a workaround disable TX vlan offloading in such cases.
+        */
+       if (unlikely(vlan_tx_tag_present(skb) &&
+                    (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
+               skb = skb_share_check(skb, GFP_ATOMIC);
+               if (unlikely(!skb))
+                       goto tx_drop;
+
+               skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+               if (unlikely(!skb))
+                       goto tx_drop;
+
+               skb->vlan_tci = 0;
+       }
+
        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
        copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
@@ -720,6 +753,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
                txq->head = start;
                dev_kfree_skb_any(skb);
        }
+tx_drop:
        return NETDEV_TX_OK;
 }
 
@@ -747,15 +781,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
  */
 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
 {
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
        u16 vtag[BE_NUM_VLANS_SUPPORTED];
        u16 ntags = 0, i;
        int status = 0;
-       u32 if_handle;
 
        if (vf) {
-               if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
-               vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
-               status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+               vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
+               status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
+                                           1, 1, 0);
        }
 
        /* No need to further configure vids if in promiscuous mode */
@@ -780,31 +814,35 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
        return status;
 }
 
-static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
        adapter->vlans_added++;
        if (!be_physfn(adapter))
-               return;
+               return 0;
 
        adapter->vlan_tag[vid] = 1;
        if (adapter->vlans_added <= (adapter->max_vlans + 1))
                be_vid_config(adapter, false, 0);
+
+       return 0;
 }
 
-static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
        adapter->vlans_added--;
 
        if (!be_physfn(adapter))
-               return;
+               return 0;
 
        adapter->vlan_tag[vid] = 0;
        if (adapter->vlans_added <= adapter->max_vlans)
                be_vid_config(adapter, false, 0);
+
+       return 0;
 }
 
 static void be_set_rx_mode(struct net_device *netdev)
@@ -841,31 +879,30 @@ done:
 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
        int status;
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+       if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
                return -EINVAL;
 
        if (lancer_chip(adapter)) {
                status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
        } else {
-               status = be_cmd_pmac_del(adapter,
-                               adapter->vf_cfg[vf].vf_if_handle,
-                               adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+               status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+                                        vf_cfg->pmac_id, vf + 1);
 
-               status = be_cmd_pmac_add(adapter, mac,
-                               adapter->vf_cfg[vf].vf_if_handle,
-                               &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+               status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
+                                        &vf_cfg->pmac_id, vf + 1);
        }
 
        if (status)
                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
                                mac, vf);
        else
-               memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+               memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
        return status;
 }
@@ -874,18 +911,19 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
                        struct ifla_vf_info *vi)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if (vf >= num_vfs)
+       if (vf >= adapter->num_vfs)
                return -EINVAL;
 
        vi->vf = vf;
-       vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
-       vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+       vi->tx_rate = vf_cfg->tx_rate;
+       vi->vlan = vf_cfg->vlan_tag;
        vi->qos = 0;
-       memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+       memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
 
        return 0;
 }
@@ -896,17 +934,17 @@ static int be_set_vf_vlan(struct net_device *netdev,
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if ((vf >= num_vfs) || (vlan > 4095))
+       if (vf >= adapter->num_vfs || vlan > 4095)
                return -EINVAL;
 
        if (vlan) {
-               adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+               adapter->vf_cfg[vf].vlan_tag = vlan;
                adapter->vlans_added++;
        } else {
-               adapter->vf_cfg[vf].vf_vlan_tag = 0;
+               adapter->vf_cfg[vf].vlan_tag = 0;
                adapter->vlans_added--;
        }
 
@@ -924,16 +962,16 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if ((vf >= num_vfs) || (rate < 0))
+       if (vf >= adapter->num_vfs || rate < 0)
                return -EINVAL;
 
        if (rate > 10000)
                rate = 10000;
 
-       adapter->vf_cfg[vf].vf_tx_rate = rate;
+       adapter->vf_cfg[vf].tx_rate = rate;
        status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
 
        if (status)
@@ -1649,8 +1687,7 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
 
 static int be_num_txqs_want(struct be_adapter *adapter)
 {
-       if ((num_vfs && adapter->sriov_enabled) ||
-               be_is_mc(adapter) ||
+       if (sriov_enabled(adapter) || be_is_mc(adapter) ||
                lancer_chip(adapter) || !be_physfn(adapter) ||
                adapter->generation == BE_GEN2)
                return 1;
@@ -1732,8 +1769,8 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
 static u32 be_num_rxqs_want(struct be_adapter *adapter)
 {
        if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
-               !adapter->sriov_enabled && be_physfn(adapter) &&
-               !be_is_mc(adapter)) {
+            !sriov_enabled(adapter) && be_physfn(adapter) &&
+            !be_is_mc(adapter)) {
                return 1 + MAX_RSS_QS; /* one default non-RSS queue */
        } else {
                dev_warn(&adapter->pdev->dev,
@@ -1933,6 +1970,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
        struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
        struct be_adapter *adapter =
                container_of(tx_eq, struct be_adapter, tx_eq);
+       struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
        struct be_tx_obj *txo;
        struct be_eth_tx_compl *txcp;
        int tx_compl, mcc_compl, status = 0;
@@ -1969,12 +2007,19 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
        mcc_compl = be_process_mcc(adapter, &status);
 
        if (mcc_compl) {
-               struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
                be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
        }
 
        napi_complete(napi);
 
+       /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
+       if (lancer_chip(adapter) && !msix_enabled(adapter)) {
+               for_all_tx_queues(adapter, txo, i)
+                       be_cq_notify(adapter, txo->cq.id, true, 0);
+
+               be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
+       }
+
        be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
        adapter->drv_stats.tx_events++;
        return 1;
@@ -2080,27 +2125,28 @@ done:
 static int be_sriov_enable(struct be_adapter *adapter)
 {
        be_check_sriov_fn_type(adapter);
+
 #ifdef CONFIG_PCI_IOV
        if (be_physfn(adapter) && num_vfs) {
                int status, pos;
-               u16 nvfs;
+               u16 dev_vfs;
 
                pos = pci_find_ext_capability(adapter->pdev,
                                                PCI_EXT_CAP_ID_SRIOV);
                pci_read_config_word(adapter->pdev,
-                                       pos + PCI_SRIOV_TOTAL_VF, &nvfs);
+                                    pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
 
-               if (num_vfs > nvfs) {
+               adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
+               if (adapter->num_vfs != num_vfs)
                        dev_info(&adapter->pdev->dev,
-                                       "Device supports %d VFs and not %d\n",
-                                       nvfs, num_vfs);
-                       num_vfs = nvfs;
-               }
+                                "Device supports %d VFs and not %d\n",
+                                adapter->num_vfs, num_vfs);
 
-               status = pci_enable_sriov(adapter->pdev, num_vfs);
-               adapter->sriov_enabled = status ? false : true;
+               status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+               if (status)
+                       adapter->num_vfs = 0;
 
-               if (adapter->sriov_enabled) {
+               if (adapter->num_vfs) {
                        adapter->vf_cfg = kcalloc(num_vfs,
                                                sizeof(struct be_vf_cfg),
                                                GFP_KERNEL);
@@ -2115,10 +2161,10 @@ static int be_sriov_enable(struct be_adapter *adapter)
 static void be_sriov_disable(struct be_adapter *adapter)
 {
 #ifdef CONFIG_PCI_IOV
-       if (adapter->sriov_enabled) {
+       if (sriov_enabled(adapter)) {
                pci_disable_sriov(adapter->pdev);
                kfree(adapter->vf_cfg);
-               adapter->sriov_enabled = false;
+               adapter->num_vfs = 0;
        }
 #endif
 }
@@ -2430,24 +2476,24 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
        u32 vf;
        int status = 0;
        u8 mac[ETH_ALEN];
+       struct be_vf_cfg *vf_cfg;
 
        be_vf_eth_addr_generate(adapter, mac);
 
-       for (vf = 0; vf < num_vfs; vf++) {
+       for_all_vfs(adapter, vf_cfg, vf) {
                if (lancer_chip(adapter)) {
                        status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
                } else {
                        status = be_cmd_pmac_add(adapter, mac,
-                                       adapter->vf_cfg[vf].vf_if_handle,
-                                       &adapter->vf_cfg[vf].vf_pmac_id,
-                                       vf + 1);
+                                                vf_cfg->if_handle,
+                                                &vf_cfg->pmac_id, vf + 1);
                }
 
                if (status)
                        dev_err(&adapter->pdev->dev,
                        "Mac address assignment failed for VF %d\n", vf);
                else
-                       memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+                       memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
                mac[5] += 1;
        }
@@ -2456,25 +2502,23 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
 
 static void be_vf_clear(struct be_adapter *adapter)
 {
+       struct be_vf_cfg *vf_cfg;
        u32 vf;
 
-       for (vf = 0; vf < num_vfs; vf++) {
+       for_all_vfs(adapter, vf_cfg, vf) {
                if (lancer_chip(adapter))
                        be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
                else
-                       be_cmd_pmac_del(adapter,
-                                       adapter->vf_cfg[vf].vf_if_handle,
-                                       adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
-       }
+                       be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+                                       vf_cfg->pmac_id, vf + 1);
 
-       for (vf = 0; vf < num_vfs; vf++)
-               be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
-                               vf + 1);
+               be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
+       }
 }
 
 static int be_clear(struct be_adapter *adapter)
 {
-       if (be_physfn(adapter) && adapter->sriov_enabled)
+       if (sriov_enabled(adapter))
                be_vf_clear(adapter);
 
        be_cmd_if_destroy(adapter, adapter->if_handle,  0);
@@ -2490,16 +2534,18 @@ static int be_clear(struct be_adapter *adapter)
 
 static void be_vf_setup_init(struct be_adapter *adapter)
 {
+       struct be_vf_cfg *vf_cfg;
        int vf;
 
-       for (vf = 0; vf < num_vfs; vf++) {
-               adapter->vf_cfg[vf].vf_if_handle = -1;
-               adapter->vf_cfg[vf].vf_pmac_id = -1;
+       for_all_vfs(adapter, vf_cfg, vf) {
+               vf_cfg->if_handle = -1;
+               vf_cfg->pmac_id = -1;
        }
 }
 
 static int be_vf_setup(struct be_adapter *adapter)
 {
+       struct be_vf_cfg *vf_cfg;
        u32 cap_flags, en_flags, vf;
        u16 lnk_speed;
        int status;
@@ -2508,11 +2554,9 @@ static int be_vf_setup(struct be_adapter *adapter)
 
        cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
                                BE_IF_FLAGS_MULTICAST;
-
-       for (vf = 0; vf < num_vfs; vf++) {
+       for_all_vfs(adapter, vf_cfg, vf) {
                status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
-                                       &adapter->vf_cfg[vf].vf_if_handle,
-                                       NULL, vf+1);
+                                         &vf_cfg->if_handle, NULL, vf + 1);
                if (status)
                        goto err;
        }
@@ -2521,12 +2565,12 @@ static int be_vf_setup(struct be_adapter *adapter)
        if (status)
                goto err;
 
-       for (vf = 0; vf < num_vfs; vf++) {
+       for_all_vfs(adapter, vf_cfg, vf) {
                status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
-                               vf + 1);
+                                                 vf + 1);
                if (status)
                        goto err;
-               adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+               vf_cfg->tx_rate = lnk_speed * 10;
        }
        return 0;
 err:
@@ -2654,7 +2698,7 @@ static int be_setup(struct be_adapter *adapter)
 
        pcie_set_readrq(adapter->pdev, 4096);
 
-       if (be_physfn(adapter) && adapter->sriov_enabled) {
+       if (sriov_enabled(adapter)) {
                status = be_vf_setup(adapter);
                if (status)
                        goto err;
@@ -2666,6 +2710,19 @@ err:
        return status;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void be_netpoll(struct net_device *netdev)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_rx_obj *rxo;
+       int i;
+
+       event_handle(adapter, &adapter->tx_eq, false);
+       for_all_rx_queues(adapter, rxo, i)
+               event_handle(adapter, &rxo->rx_eq, true);
+}
+#endif
+
 #define FW_FILE_HDR_SIGN       "ServerEngines Corp. "
 static bool be_flash_redboot(struct be_adapter *adapter,
                        const u8 *p, u32 img_start, int image_size,
@@ -3014,7 +3071,10 @@ static struct net_device_ops be_netdev_ops = {
        .ndo_set_vf_mac         = be_set_vf_mac,
        .ndo_set_vf_vlan        = be_set_vf_vlan,
        .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
-       .ndo_get_vf_config      = be_get_vf_config
+       .ndo_get_vf_config      = be_get_vf_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = be_netpoll,
+#endif
 };
 
 static void be_netdev_init(struct net_device *netdev)
@@ -3261,6 +3321,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
                break;
        case BE_DEVICE_ID2:
        case OC_DEVICE_ID2:
+       case OC_DEVICE_ID5:
                adapter->generation = BE_GEN3;
                break;
        case OC_DEVICE_ID3:
index 5272f9d..820de8b 100644 (file)
@@ -21,7 +21,7 @@ config NET_VENDOR_FREESCALE
 if NET_VENDOR_FREESCALE
 
 config FEC
-       bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+       tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
        depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
                   ARCH_MXC || ARCH_MXS)
        default ARCH_MXC || ARCH_MXS if ARM
index 1124ce0..4ea2bdc 100644 (file)
@@ -99,7 +99,7 @@ static struct platform_device_id fec_devtype[] = {
 MODULE_DEVICE_TABLE(platform, fec_devtype);
 
 enum imx_fec_type {
-       IMX25_FEC = 1,  /* runs on i.mx25/50/53 */
+       IMX25_FEC = 1,  /* runs on i.mx25/50/53 */
        IMX27_FEC,      /* runs on i.mx27/35/51 */
        IMX28_FEC,
        IMX6Q_FEC,
@@ -132,7 +132,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 #elif defined (CONFIG_M5272C3)
 #define        FEC_FLASHMAC    (0xffe04000 + 4)
 #elif defined(CONFIG_MOD5272)
-#define FEC_FLASHMAC   0xffc0406b
+#define FEC_FLASHMAC   0xffc0406b
 #else
 #define        FEC_FLASHMAC    0
 #endif
@@ -232,6 +232,7 @@ struct fec_enet_private {
        struct  platform_device *pdev;
 
        int     opened;
+       int     dev_id;
 
        /* Phylib and MDIO interface */
        struct  mii_bus *mii_bus;
@@ -259,6 +260,8 @@ struct fec_enet_private {
 /* Transmitter timeout */
 #define TX_TIMEOUT (2 * HZ)
 
+static int mii_cnt;
+
 static void *swap_buffer(void *bufaddr, int len)
 {
        int i;
@@ -515,6 +518,7 @@ fec_stop(struct net_device *ndev)
        struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
                                platform_get_device_id(fep->pdev);
+       u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
 
        /* We cannot expect a graceful transmit stop without link !!! */
        if (fep->link) {
@@ -531,8 +535,10 @@ fec_stop(struct net_device *ndev)
        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 
        /* We have to keep ENET enabled to have MII interrupt stay working */
-       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
                writel(2, fep->hwp + FEC_ECNTRL);
+               writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+       }
 }
 
 
@@ -818,7 +824,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
                        iap = (unsigned char *)FEC_FLASHMAC;
 #else
                if (pdata)
-                       memcpy(iap, pdata->mac, ETH_ALEN);
+                       iap = (unsigned char *)&pdata->mac;
 #endif
        }
 
@@ -837,7 +843,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
 
        /* Adjust MAC if using macaddr */
        if (iap == macaddr)
-                ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+                ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
 }
 
 /* ------------------------------------------------------------------------- */
@@ -865,6 +871,8 @@ static void fec_enet_adjust_link(struct net_device *ndev)
        if (phy_dev->link) {
                if (fep->full_duplex != phy_dev->duplex) {
                        fec_restart(ndev, phy_dev->duplex);
+                       /* prevent unnecessary second fec_restart() below */
+                       fep->link = phy_dev->link;
                        status_change = 1;
                }
        }
@@ -953,7 +961,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        char mdio_bus_id[MII_BUS_ID_SIZE];
        char phy_name[MII_BUS_ID_SIZE + 3];
        int phy_id;
-       int dev_id = fep->pdev->id;
+       int dev_id = fep->dev_id;
 
        fep->phy_dev = NULL;
 
@@ -972,8 +980,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        }
 
        if (phy_id >= PHY_MAX_ADDR) {
-               printk(KERN_INFO "%s: no PHY, assuming direct connection "
-                       "to switch\n", ndev->name);
+               printk(KERN_INFO
+                       "%s: no PHY, assuming direct connection to switch\n",
+                       ndev->name);
                strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
                phy_id = 0;
        }
@@ -998,8 +1007,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        fep->link = 0;
        fep->full_duplex = 0;
 
-       printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
-               "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
+       printk(KERN_INFO
+               "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+               ndev->name,
                fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
                fep->phy_dev->irq);
 
@@ -1031,10 +1041,14 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * mdio interface in board design, and need to be configured by
         * fec0 mii_bus.
         */
-       if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+       if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
                /* fec1 uses fec0 mii_bus */
-               fep->mii_bus = fec0_mii_bus;
-               return 0;
+               if (mii_cnt && fec0_mii_bus) {
+                       fep->mii_bus = fec0_mii_bus;
+                       mii_cnt++;
+                       return 0;
+               }
+               return -ENOENT;
        }
 
        fep->mii_timeout = 0;
@@ -1063,7 +1077,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        fep->mii_bus->read = fec_enet_mdio_read;
        fep->mii_bus->write = fec_enet_mdio_write;
        fep->mii_bus->reset = fec_enet_mdio_reset;
-       snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+       snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
        fep->mii_bus->priv = fep;
        fep->mii_bus->parent = &pdev->dev;
 
@@ -1079,6 +1093,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        if (mdiobus_register(fep->mii_bus))
                goto err_out_free_mdio_irq;
 
+       mii_cnt++;
+
        /* save fec0 mii_bus */
        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
                fec0_mii_bus = fep->mii_bus;
@@ -1095,11 +1111,11 @@ err_out:
 
 static void fec_enet_mii_remove(struct fec_enet_private *fep)
 {
-       if (fep->phy_dev)
-               phy_disconnect(fep->phy_dev);
-       mdiobus_unregister(fep->mii_bus);
-       kfree(fep->mii_bus->irq);
-       mdiobus_free(fep->mii_bus);
+       if (--mii_cnt == 0) {
+               mdiobus_unregister(fep->mii_bus);
+               kfree(fep->mii_bus->irq);
+               mdiobus_free(fep->mii_bus);
+       }
 }
 
 static int fec_enet_get_settings(struct net_device *ndev,
@@ -1521,6 +1537,7 @@ fec_probe(struct platform_device *pdev)
        int i, irq, ret = 0;
        struct resource *r;
        const struct of_device_id *of_id;
+       static int dev_id;
 
        of_id = of_match_device(fec_dt_ids, &pdev->dev);
        if (of_id)
@@ -1548,6 +1565,7 @@ fec_probe(struct platform_device *pdev)
 
        fep->hwp = ioremap(r->start, resource_size(r));
        fep->pdev = pdev;
+       fep->dev_id = dev_id++;
 
        if (!fep->hwp) {
                ret = -ENOMEM;
@@ -1571,8 +1589,12 @@ fec_probe(struct platform_device *pdev)
 
        for (i = 0; i < FEC_IRQ_NUM; i++) {
                irq = platform_get_irq(pdev, i);
-               if (i && irq < 0)
-                       break;
+               if (irq < 0) {
+                       if (i)
+                               break;
+                       ret = irq;
+                       goto failed_irq;
+               }
                ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
                if (ret) {
                        while (--i >= 0) {
@@ -1583,7 +1605,7 @@ fec_probe(struct platform_device *pdev)
                }
        }
 
-       fep->clk = clk_get(&pdev->dev, "fec_clk");
+       fep->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(fep->clk)) {
                ret = PTR_ERR(fep->clk);
                goto failed_clk;
@@ -1635,13 +1657,18 @@ fec_drv_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
        struct resource *r;
+       int i;
 
-       fec_stop(ndev);
+       unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
+       for (i = 0; i < FEC_IRQ_NUM; i++) {
+               int irq = platform_get_irq(pdev, i);
+               if (irq > 0)
+                       free_irq(irq, ndev);
+       }
        clk_disable(fep->clk);
        clk_put(fep->clk);
        iounmap(fep->hwp);
-       unregister_netdev(ndev);
        free_netdev(ndev);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index f109602..9eb8159 100644 (file)
@@ -356,14 +356,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
 
                if (prop)
                        tbiaddr = *prop;
+       }
 
-               if (tbiaddr == -1) {
-                       err = -EBUSY;
-
-                       goto err_free_irqs;
-               } else {
-                       out_be32(tbipa, tbiaddr);
-               }
+       if (tbiaddr == -1) {
+               err = -EBUSY;
+               goto err_free_irqs;
+       } else {
+               out_be32(tbipa, tbiaddr);
        }
 
        err = of_mdiobus_register(new_bus, np);
index bfeccbf..3554414 100644 (file)
@@ -2114,17 +2114,19 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
        struct hcp_ehea_port_cb1 *cb1;
        int index;
        u64 hret;
+       int err = 0;
 
        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
        if (!cb1) {
                pr_err("no mem for cb1\n");
+               err = -ENOMEM;
                goto out;
        }
 
@@ -2132,6 +2134,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
        if (hret != H_SUCCESS) {
                pr_err("query_ehea_port failed\n");
+               err = -EINVAL;
                goto out;
        }
 
@@ -2140,24 +2143,28 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 
        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
-       if (hret != H_SUCCESS)
+       if (hret != H_SUCCESS) {
                pr_err("modify_ehea_port failed\n");
+               err = -EINVAL;
+       }
 out:
        free_page((unsigned long)cb1);
-       return;
+       return err;
 }
 
-static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
        struct hcp_ehea_port_cb1 *cb1;
        int index;
        u64 hret;
+       int err = 0;
 
        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
        if (!cb1) {
                pr_err("no mem for cb1\n");
+               err = -ENOMEM;
                goto out;
        }
 
@@ -2165,6 +2172,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
        if (hret != H_SUCCESS) {
                pr_err("query_ehea_port failed\n");
+               err = -EINVAL;
                goto out;
        }
 
@@ -2173,10 +2181,13 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 
        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
-       if (hret != H_SUCCESS)
+       if (hret != H_SUCCESS) {
                pr_err("modify_ehea_port failed\n");
+               err = -EINVAL;
+       }
 out:
        free_page((unsigned long)cb1);
+       return err;
 }
 
 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
index 82f4ef1..985d589 100644 (file)
@@ -169,8 +169,8 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 static bool e1000_vlan_used(struct e1000_adapter *adapter);
 static void e1000_vlan_mode(struct net_device *netdev,
                            netdev_features_t features);
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void e1000_restore_vlan(struct e1000_adapter *adapter);
 
 #ifdef CONFIG_PM
@@ -1185,7 +1185,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                if (global_quad_port_a != 0)
                        adapter->eeprom_wol = 0;
                else
-                       adapter->quad_port_a = 1;
+                       adapter->quad_port_a = true;
                /* Reset for multiple quad port adapters */
                if (++global_quad_port_a == 4)
                        global_quad_port_a = 0;
@@ -1679,7 +1679,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
         * need this to apply a workaround later in the send path. */
        if (hw->mac_type == e1000_82544 &&
            hw->bus_type == e1000_bus_type_pcix)
-               adapter->pcix_82544 = 1;
+               adapter->pcix_82544 = true;
 
        ew32(TCTL, tctl);
 
@@ -2002,7 +2002,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-       tx_ring->last_tx_tso = 0;
+       tx_ring->last_tx_tso = false;
 
        writel(0, hw->hw_addr + tx_ring->tdh);
        writel(0, hw->hw_addr + tx_ring->tdt);
@@ -2851,7 +2851,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                 * DMA'd to the controller */
                if (!skb->data_len && tx_ring->last_tx_tso &&
                    !skb_is_gso(skb)) {
-                       tx_ring->last_tx_tso = 0;
+                       tx_ring->last_tx_tso = false;
                        size -= 4;
                }
 
@@ -3219,7 +3219,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 
        if (likely(tso)) {
                if (likely(hw->mac_type != e1000_82544))
-                       tx_ring->last_tx_tso = 1;
+                       tx_ring->last_tx_tso = true;
                tx_flags |= E1000_TX_FLAGS_TSO;
        } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
                tx_flags |= E1000_TX_FLAGS_CSUM;
@@ -4604,7 +4604,7 @@ static void e1000_vlan_mode(struct net_device *netdev,
                e1000_irq_enable(adapter);
 }
 
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -4613,7 +4613,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        if ((hw->mng_cookie.status &
             E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
            (vid == adapter->mng_vlan_id))
-               return;
+               return 0;
 
        if (!e1000_vlan_used(adapter))
                e1000_vlan_filter_on_off(adapter, true);
@@ -4625,9 +4625,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        e1000_write_vfta(hw, index, vfta);
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -4648,6 +4650,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 
        if (!e1000_vlan_used(adapter))
                e1000_vlan_filter_on_off(adapter, false);
+
+       return 0;
 }
 
 static void e1000_restore_vlan(struct e1000_adapter *adapter)
index 9fe18d1..f478a22 100644 (file)
@@ -309,6 +309,7 @@ struct e1000_adapter {
        u32 txd_cmd;
 
        bool detect_tx_hung;
+       bool tx_hang_recheck;
        u8 tx_timeout_factor;
 
        u32 tx_int_delay;
index c6e9763..3911401 100644 (file)
@@ -859,7 +859,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
        u32 length, staterr;
        unsigned int i;
        int cleaned_count = 0;
-       bool cleaned = 0;
+       bool cleaned = false;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
        i = rx_ring->next_to_clean;
@@ -888,7 +888,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
                next_buffer = &rx_ring->buffer_info[i];
 
-               cleaned = 1;
+               cleaned = true;
                cleaned_count++;
                dma_unmap_single(&pdev->dev,
                                 buffer_info->dma,
@@ -1014,6 +1014,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
        struct e1000_adapter *adapter = container_of(work,
                                                     struct e1000_adapter,
                                                     print_hang_task);
+       struct net_device *netdev = adapter->netdev;
        struct e1000_ring *tx_ring = adapter->tx_ring;
        unsigned int i = tx_ring->next_to_clean;
        unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1025,6 +1026,21 @@ static void e1000_print_hw_hang(struct work_struct *work)
        if (test_bit(__E1000_DOWN, &adapter->state))
                return;
 
+       if (!adapter->tx_hang_recheck &&
+           (adapter->flags2 & FLAG2_DMA_BURST)) {
+               /* May be block on write-back, flush and detect again
+                * flush pending descriptor writebacks to memory
+                */
+               ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+               /* execute the writes immediately */
+               e1e_flush();
+               adapter->tx_hang_recheck = true;
+               return;
+       }
+       /* Real hang detected */
+       adapter->tx_hang_recheck = false;
+       netif_stop_queue(netdev);
+
        e1e_rphy(hw, PHY_STATUS, &phy_status);
        e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
        e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1141,14 +1157,14 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                 * Detect a transmit hang in hardware, this serializes the
                 * check with the clearing of time_stamp and movement of i
                 */
-               adapter->detect_tx_hung = 0;
+               adapter->detect_tx_hung = false;
                if (tx_ring->buffer_info[i].time_stamp &&
                    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
                               + (adapter->tx_timeout_factor * HZ)) &&
-                   !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+                   !(er32(STATUS) & E1000_STATUS_TXOFF))
                        schedule_work(&adapter->print_hang_task);
-                       netif_stop_queue(netdev);
-               }
+               else
+                       adapter->tx_hang_recheck = false;
        }
        adapter->total_tx_bytes += total_tx_bytes;
        adapter->total_tx_packets += total_tx_packets;
@@ -1176,7 +1192,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
        unsigned int i, j;
        u32 length, staterr;
        int cleaned_count = 0;
-       bool cleaned = 0;
+       bool cleaned = false;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
        i = rx_ring->next_to_clean;
@@ -1202,7 +1218,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 
                next_buffer = &rx_ring->buffer_info[i];
 
-               cleaned = 1;
+               cleaned = true;
                cleaned_count++;
                dma_unmap_single(&pdev->dev, buffer_info->dma,
                                 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
@@ -2506,7 +2522,7 @@ clean_rx:
        return work_done;
 }
 
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -2516,7 +2532,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        if ((adapter->hw.mng_cookie.status &
             E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
            (vid == adapter->mng_vlan_id))
-               return;
+               return 0;
 
        /* add VID to filter table */
        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
@@ -2527,9 +2543,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        }
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -2540,7 +2558,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
            (vid == adapter->mng_vlan_id)) {
                /* release control to f/w */
                e1000e_release_hw_control(adapter);
-               return;
+               return 0;
        }
 
        /* remove VID from filter table */
@@ -2552,6 +2570,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        }
 
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 /**
@@ -3500,7 +3520,6 @@ int e1000e_up(struct e1000_adapter *adapter)
 
        clear_bit(__E1000_DOWN, &adapter->state);
 
-       napi_enable(&adapter->napi);
        if (adapter->msix_entries)
                e1000_configure_msix(adapter);
        e1000_irq_enable(adapter);
@@ -3562,7 +3581,6 @@ void e1000e_down(struct e1000_adapter *adapter)
        e1e_flush();
        usleep_range(10000, 20000);
 
-       napi_disable(&adapter->napi);
        e1000_irq_disable(adapter);
 
        del_timer_sync(&adapter->watchdog_timer);
@@ -3838,6 +3856,7 @@ static int e1000_open(struct net_device *netdev)
 
        e1000_irq_enable(adapter);
 
+       adapter->tx_hang_recheck = false;
        netif_start_queue(netdev);
 
        adapter->idle_check = true;
@@ -3884,6 +3903,8 @@ static int e1000_close(struct net_device *netdev)
 
        pm_runtime_get_sync(&pdev->dev);
 
+       napi_disable(&adapter->napi);
+
        if (!test_bit(__E1000_DOWN, &adapter->state)) {
                e1000e_down(adapter);
                e1000_free_irq(adapter);
@@ -4236,7 +4257,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
 static bool e1000e_has_link(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       bool link_active = 0;
+       bool link_active = false;
        s32 ret_val = 0;
 
        /*
@@ -4251,7 +4272,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
                        ret_val = hw->mac.ops.check_for_link(hw);
                        link_active = !hw->mac.get_link_status;
                } else {
-                       link_active = 1;
+                       link_active = true;
                }
                break;
        case e1000_media_type_fiber:
@@ -4350,7 +4371,7 @@ static void e1000_watchdog_task(struct work_struct *work)
 
        if (link) {
                if (!netif_carrier_ok(netdev)) {
-                       bool txb2b = 1;
+                       bool txb2b = true;
 
                        /* Cancel scheduled suspend requests. */
                        pm_runtime_resume(netdev->dev.parent);
@@ -4383,11 +4404,11 @@ static void e1000_watchdog_task(struct work_struct *work)
                        adapter->tx_timeout_factor = 1;
                        switch (adapter->link_speed) {
                        case SPEED_10:
-                               txb2b = 0;
+                               txb2b = false;
                                adapter->tx_timeout_factor = 16;
                                break;
                        case SPEED_100:
-                               txb2b = 0;
+                               txb2b = false;
                                adapter->tx_timeout_factor = 10;
                                break;
                        }
@@ -4523,7 +4544,7 @@ link_up:
        e1000e_flush_descriptors(adapter);
 
        /* Force detection of hung controller every watchdog period */
-       adapter->detect_tx_hung = 1;
+       adapter->detect_tx_hung = true;
 
        /*
         * With 82571 controllers, LAA may be overwritten due to controller
@@ -6187,7 +6208,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        /* Initialize link parameters. User can change them with ethtool */
        adapter->hw.mac.autoneg = 1;
-       adapter->fc_autoneg = 1;
+       adapter->fc_autoneg = true;
        adapter->hw.fc.requested_mode = e1000_fc_default;
        adapter->hw.fc.current_mode = e1000_fc_default;
        adapter->hw.phy.autoneg_advertised = 0x2f;
index b66b8aa..89d576c 100644 (file)
@@ -148,8 +148,8 @@ static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
 static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
-static void igb_vlan_rx_add_vid(struct net_device *, u16);
-static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+static int igb_vlan_rx_add_vid(struct net_device *, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *, u16);
 static void igb_restore_vlan(struct igb_adapter *);
 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
 static void igb_ping_all_vfs(struct igb_adapter *);
@@ -6491,7 +6491,7 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
        igb_rlpml_set(adapter);
 }
 
-static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -6504,9 +6504,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        igb_vfta_set(hw, vid, true);
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -6521,6 +6523,8 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
                igb_vfta_set(hw, vid, false);
 
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -7061,15 +7065,28 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
                        wr32(E1000_DMCTXTH, 0);
 
                        /*
-                        * DMA Coalescing high water mark needs to be higher
-                        * than the RX threshold. set hwm to PBA -  2 * max
-                        * frame size
+                        * DMA Coalescing high water mark needs to be greater
+                        * than the Rx threshold. Set hwm to PBA - max frame
+                        * size in 16B units, capping it at PBA - 6KB.
                         */
-                       hwm = pba - (2 * adapter->max_frame_size);
+                       hwm = 64 * pba - adapter->max_frame_size / 16;
+                       if (hwm < 64 * (pba - 6))
+                               hwm = 64 * (pba - 6);
+                       reg = rd32(E1000_FCRTC);
+                       reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+                       reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+                               & E1000_FCRTC_RTH_COAL_MASK);
+                       wr32(E1000_FCRTC, reg);
+
+                       /*
+                        * Set the DMA Coalescing Rx threshold to PBA - 2 * max
+                        * frame size, capping it at PBA - 10KB.
+                        */
+                       dmac_thr = pba - adapter->max_frame_size / 512;
+                       if (dmac_thr < pba - 10)
+                               dmac_thr = pba - 10;
                        reg = rd32(E1000_DMACR);
                        reg &= ~E1000_DMACR_DMACTHR_MASK;
-                       dmac_thr = pba - 4;
-
                        reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
                                & E1000_DMACR_DMACTHR_MASK);
 
@@ -7085,7 +7102,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
                         * coalescing(smart fifb)-UTRESH=0
                         */
                        wr32(E1000_DMCRTRH, 0);
-                       wr32(E1000_FCRTC, hwm);
 
                        reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
 
index c358973..fd3da30 100644 (file)
@@ -1176,18 +1176,20 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
        e1000_rlpml_set_vf(hw, max_frame_size);
 }
 
-static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if (hw->mac.ops.set_vfta(hw, vid, true))
+       if (hw->mac.ops.set_vfta(hw, vid, true)) {
                dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
-       else
-               set_bit(vid, adapter->active_vlans);
+               return -EINVAL;
+       }
+       set_bit(vid, adapter->active_vlans);
+       return 0;
 }
 
-static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -1197,11 +1199,13 @@ static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        if (!test_bit(__IGBVF_DOWN, &adapter->state))
                igbvf_irq_enable(adapter);
 
-       if (hw->mac.ops.set_vfta(hw, vid, false))
+       if (hw->mac.ops.set_vfta(hw, vid, false)) {
                dev_err(&adapter->pdev->dev,
                        "Failed to remove vlan id %d\n", vid);
-       else
-               clear_bit(vid, adapter->active_vlans);
+               return -EINVAL;
+       }
+       clear_bit(vid, adapter->active_vlans);
+       return 0;
 }
 
 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
index 247cf92..9bd5faf 100644 (file)
@@ -101,8 +101,8 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
 
 static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
 static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -228,7 +228,7 @@ ixgb_up(struct ixgb_adapter *adapter)
        if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
                err = pci_enable_msi(adapter->pdev);
                if (!err) {
-                       adapter->have_msi = 1;
+                       adapter->have_msi = true;
                        irq_flags = 0;
                }
                /* proceed to try to request regular interrupt */
@@ -2217,7 +2217,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
        IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
 }
 
-static void
+static int
 ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2230,9 +2230,11 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        vfta |= (1 << (vid & 0x1F));
        ixgb_write_vfta(&adapter->hw, index, vfta);
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void
+static int
 ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2245,6 +2247,8 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        vfta &= ~(1 << (vid & 0x1F));
        ixgb_write_vfta(&adapter->hw, index, vfta);
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 static void
index f1365fe..bdf535a 100644 (file)
@@ -2599,7 +2599,7 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
 {
        ixgbe_link_speed speed = 0;
-       bool link_up = 0;
+       bool link_up = false;
        u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 
index 33b93ff..da31735 100644 (file)
@@ -158,10 +158,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       /* Abort a bad configuration */
-       if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
-               return;
-
        if (prio != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
        if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -185,7 +181,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
 
        if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
             adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
-               adapter->dcb_set_bitmap |= BIT_PFC;
+               adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -206,10 +202,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       /* Abort bad configurations */
-       if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
-               return;
-
        if (prio != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
        if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -309,6 +301,27 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
        *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
 }
 
+#ifdef IXGBE_FCOE
+static void ixgbe_dcbnl_devreset(struct net_device *dev)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+       while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+               usleep_range(1000, 2000);
+
+       if (netif_running(dev))
+               dev->netdev_ops->ndo_stop(dev);
+
+       ixgbe_clear_interrupt_scheme(adapter);
+       ixgbe_init_interrupt_scheme(adapter);
+
+       if (netif_running(dev))
+               dev->netdev_ops->ndo_open(dev);
+
+       clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+#endif
+
 static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -338,27 +351,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
        if (ret)
                return DCB_NO_HW_CHG;
 
-#ifdef IXGBE_FCOE
-       if (up && !(up & (1 << adapter->fcoe.up)))
-               adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
-
-       /*
-        * Only take down the adapter if an app change occurred. FCoE
-        * may shuffle tx rings in this case and this can not be done
-        * without a reset currently.
-        */
-       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-               while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
-                       usleep_range(1000, 2000);
-
-               adapter->fcoe.up = ffs(up) - 1;
-
-               if (netif_running(netdev))
-                       netdev->netdev_ops->ndo_stop(netdev);
-               ixgbe_clear_interrupt_scheme(adapter);
-       }
-#endif
-
        if (adapter->dcb_cfg.pfc_mode_enable) {
                switch (adapter->hw.mac.type) {
                case ixgbe_mac_82599EB:
@@ -385,15 +377,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
                }
        }
 
-#ifdef IXGBE_FCOE
-       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-               ixgbe_init_interrupt_scheme(adapter);
-               if (netif_running(netdev))
-                       netdev->netdev_ops->ndo_open(netdev);
-               ret = DCB_HW_CHG_RST;
-       }
-#endif
-
        if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
                u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
                u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
@@ -442,8 +425,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
        if (adapter->dcb_cfg.pfc_mode_enable)
                adapter->hw.fc.current_mode = ixgbe_fc_pfc;
 
-       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
-               clear_bit(__IXGBE_RESETTING, &adapter->state);
+#ifdef IXGBE_FCOE
+       /* Reprogam FCoE hardware offloads when the traffic class
+        * FCoE is using changes. This happens if the APP info
+        * changes or the up2tc mapping is updated.
+        */
+       if ((up && !(up & (1 << adapter->fcoe.up))) ||
+           (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
+               adapter->fcoe.up = ffs(up) - 1;
+               ixgbe_dcbnl_devreset(netdev);
+               ret = DCB_HW_CHG_RST;
+       }
+#endif
+
        adapter->dcb_set_bitmap = 0x00;
        return ret;
 }
@@ -661,22 +655,6 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
        return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
 }
 
-#ifdef IXGBE_FCOE
-static void ixgbe_dcbnl_devreset(struct net_device *dev)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(dev);
-
-       if (netif_running(dev))
-               dev->netdev_ops->ndo_stop(dev);
-
-       ixgbe_clear_interrupt_scheme(adapter);
-       ixgbe_init_interrupt_scheme(adapter);
-
-       if (netif_running(dev))
-               dev->netdev_ops->ndo_open(dev);
-}
-#endif
-
 static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
                                   struct dcb_app *app)
 {
@@ -761,7 +739,9 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
                ixgbe_dcbnl_ieee_setets(dev, &ets);
                ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
        } else if (mode & DCB_CAP_DCBX_VER_CEE) {
-               adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
+               u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG;
+
+               adapter->dcb_set_bitmap |= mask;
                ixgbe_dcbnl_set_all(dev);
        } else {
                /* Drop into single TC mode strict priority as this
index 1b28ed9..fcf8d4e 100644 (file)
@@ -3044,7 +3044,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        hw->mac.ops.enable_rx_dma(hw, rxctrl);
 }
 
-static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -3053,9 +3053,11 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        /* add VID to filter table */
        hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -3064,6 +3066,8 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        /* remove VID from filter table */
        hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 /**
@@ -4019,7 +4023,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 
                /* Mark all the VFs as inactive */
                for (i = 0 ; i < adapter->num_vfs; i++)
-                       adapter->vfinfo[i].clear_to_send = 0;
+                       adapter->vfinfo[i].clear_to_send = false;
 
                /* ping all the active vfs to let them know we are going down */
                ixgbe_ping_all_vfs(adapter);
index 9a56fd7..8b113e3 100644 (file)
@@ -1214,7 +1214,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
        u32 max_retry = 10;
        u32 retry = 0;
        u16 swfw_mask = 0;
-       bool nack = 1;
+       bool nack = true;
        *data = 0;
 
        if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -1421,7 +1421,7 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
 static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
 {
        s32 i;
-       bool bit = 0;
+       bool bit = false;
 
        for (i = 7; i >= 0; i--) {
                ixgbe_clock_in_i2c_bit(hw, &bit);
@@ -1443,7 +1443,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
        s32 status = 0;
        s32 i;
        u32 i2cctl;
-       bool bit = 0;
+       bool bit = false;
 
        for (i = 7; i >= 0; i--) {
                bit = (data >> i) & 0x1;
@@ -1473,7 +1473,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
        u32 i = 0;
        u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
        u32 timeout = 10;
-       bool ack = 1;
+       bool ack = true;
 
        ixgbe_raise_i2c_clk(hw, &i2cctl);
 
@@ -1646,9 +1646,9 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl)
        bool data;
 
        if (*i2cctl & IXGBE_I2C_DATA_IN)
-               data = 1;
+               data = true;
        else
-               data = 0;
+               data = false;
 
        return data;
 }
index df04f1a..e8badab 100644 (file)
@@ -33,7 +33,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter);
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
 void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
-void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
                           u8 qos);
index 5d1a643..891162d 100644 (file)
@@ -1403,7 +1403,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
        }
 }
 
-static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -1412,9 +1412,11 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        if (hw->mac.ops.set_vfta)
                hw->mac.ops.set_vfta(hw, vid, 0, true);
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -1423,6 +1425,8 @@ static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        if (hw->mac.ops.set_vfta)
                hw->mac.ops.set_vfta(hw, vid, 0, false);
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
index d1aa45a..4a40ab9 100644 (file)
@@ -1,7 +1,7 @@
 obj-$(CONFIG_MLX4_CORE)                += mlx4_core.o
 
 mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-               mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
+               mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
 
 obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
 
index 45aea9c..915e947 100644 (file)
@@ -48,7 +48,8 @@ static struct work_struct catas_work;
 static int internal_err_reset = 1;
 module_param(internal_err_reset, int, 0644);
 MODULE_PARM_DESC(internal_err_reset,
-                "Reset device on internal errors if non-zero (default 1)");
+                "Reset device on internal errors if non-zero"
+                " (default 1, in SRIOV mode default is 0)");
 
 static void dump_err_buf(struct mlx4_dev *dev)
 {
@@ -116,6 +117,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        phys_addr_t addr;
 
+       /*If we are in SRIOV the default of the module param must be 0*/
+       if (mlx4_is_mfunc(dev))
+               internal_err_reset = 0;
+
        INIT_LIST_HEAD(&priv->catas_err.list);
        init_timer(&priv->catas_err.timer);
        priv->catas_err.map = NULL;
index 78f5a1a..978f593 100644 (file)
 #include <linux/errno.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/semaphore.h>
 
 #include <asm/io.h>
 
 #include "mlx4.h"
+#include "fw.h"
 
 #define CMD_POLL_TOKEN 0xffff
+#define INBOX_MASK     0xffffffffffffff00ULL
+
+#define CMD_CHAN_VER 1
+#define CMD_CHAN_IF_REV 1
 
 enum {
        /* command completed successfully: */
@@ -110,8 +116,12 @@ struct mlx4_cmd_context {
        int                     next;
        u64                     out_param;
        u16                     token;
+       u8                      fw_status;
 };
 
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+                                   struct mlx4_vhcr_cmd *in_vhcr);
+
 static int mlx4_status_to_errno(u8 status)
 {
        static const int trans_table[] = {
@@ -142,6 +152,139 @@ static int mlx4_status_to_errno(u8 status)
        return trans_table[status];
 }
 
+static u8 mlx4_errno_to_status(int errno)
+{
+       switch (errno) {
+       case -EPERM:
+               return CMD_STAT_BAD_OP;
+       case -EINVAL:
+               return CMD_STAT_BAD_PARAM;
+       case -ENXIO:
+               return CMD_STAT_BAD_SYS_STATE;
+       case -EBUSY:
+               return CMD_STAT_RESOURCE_BUSY;
+       case -ENOMEM:
+               return CMD_STAT_EXCEED_LIM;
+       case -ENFILE:
+               return CMD_STAT_ICM_ERROR;
+       default:
+               return CMD_STAT_INTERNAL_ERR;
+       }
+}
+
+static int comm_pending(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u32 status = readl(&priv->mfunc.comm->slave_read);
+
+       return (swab32(status) >> 31) != priv->cmd.comm_toggle;
+}
+
+static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u32 val;
+
+       priv->cmd.comm_toggle ^= 1;
+       val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
+       __raw_writel((__force u32) cpu_to_be32(val),
+                    &priv->mfunc.comm->slave_write);
+       mmiowb();
+}
+
+static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
+                      unsigned long timeout)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       unsigned long end;
+       int err = 0;
+       int ret_from_pending = 0;
+
+       /* First, verify that the master reports correct status */
+       if (comm_pending(dev)) {
+               mlx4_warn(dev, "Communication channel is not idle."
+                         "my toggle is %d (cmd:0x%x)\n",
+                         priv->cmd.comm_toggle, cmd);
+               return -EAGAIN;
+       }
+
+       /* Write command */
+       down(&priv->cmd.poll_sem);
+       mlx4_comm_cmd_post(dev, cmd, param);
+
+       end = msecs_to_jiffies(timeout) + jiffies;
+       while (comm_pending(dev) && time_before(jiffies, end))
+               cond_resched();
+       ret_from_pending = comm_pending(dev);
+       if (ret_from_pending) {
+               /* check if the slave is trying to boot in the middle of
+                * FLR process. The only non-zero result in the RESET command
+                * is MLX4_DELAY_RESET_SLAVE*/
+               if ((MLX4_COMM_CMD_RESET == cmd)) {
+                       mlx4_warn(dev, "Got slave FLRed from Communication"
+                                 " channel (ret:0x%x)\n", ret_from_pending);
+                       err = MLX4_DELAY_RESET_SLAVE;
+               } else {
+                       mlx4_warn(dev, "Communication channel timed out\n");
+                       err = -ETIMEDOUT;
+               }
+       }
+
+       up(&priv->cmd.poll_sem);
+       return err;
+}
+
+static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
+                             u16 param, unsigned long timeout)
+{
+       struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+       struct mlx4_cmd_context *context;
+       int err = 0;
+
+       down(&cmd->event_sem);
+
+       spin_lock(&cmd->context_lock);
+       BUG_ON(cmd->free_head < 0);
+       context = &cmd->context[cmd->free_head];
+       context->token += cmd->token_mask + 1;
+       cmd->free_head = context->next;
+       spin_unlock(&cmd->context_lock);
+
+       init_completion(&context->done);
+
+       mlx4_comm_cmd_post(dev, op, param);
+
+       if (!wait_for_completion_timeout(&context->done,
+                                        msecs_to_jiffies(timeout))) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = context->result;
+       if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
+               mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+                        op, context->fw_status);
+               goto out;
+       }
+
+out:
+       spin_lock(&cmd->context_lock);
+       context->next = cmd->free_head;
+       cmd->free_head = context - cmd->context;
+       spin_unlock(&cmd->context_lock);
+
+       up(&cmd->event_sem);
+       return err;
+}
+
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+                 unsigned long timeout)
+{
+       if (mlx4_priv(dev)->cmd.use_events)
+               return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
+       return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
+}
+
 static int cmd_pending(struct mlx4_dev *dev)
 {
        u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -167,8 +310,10 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
                end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
 
        while (cmd_pending(dev)) {
-               if (time_after_eq(jiffies, end))
+               if (time_after_eq(jiffies, end)) {
+                       mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
                        goto out;
+               }
                cond_resched();
        }
 
@@ -192,7 +337,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
                                               (cmd->toggle << HCR_T_BIT)       |
                                               (event ? (1 << HCR_E_BIT) : 0)   |
                                               (op_modifier << HCR_OPMOD_SHIFT) |
-                                              op),                       hcr + 6);
+                                              op), hcr + 6);
 
        /*
         * Make sure that our HCR writes don't get mixed in with
@@ -209,6 +354,62 @@ out:
        return ret;
 }
 
+static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+                         int out_is_imm, u32 in_modifier, u8 op_modifier,
+                         u16 op, unsigned long timeout)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
+       int ret;
+
+       down(&priv->cmd.slave_sem);
+       vhcr->in_param = cpu_to_be64(in_param);
+       vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
+       vhcr->in_modifier = cpu_to_be32(in_modifier);
+       vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
+       vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
+       vhcr->status = 0;
+       vhcr->flags = !!(priv->cmd.use_events) << 6;
+       if (mlx4_is_master(dev)) {
+               ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
+               if (!ret) {
+                       if (out_is_imm) {
+                               if (out_param)
+                                       *out_param =
+                                               be64_to_cpu(vhcr->out_param);
+                               else {
+                                       mlx4_err(dev, "response expected while"
+                                                "output mailbox is NULL for "
+                                                "command 0x%x\n", op);
+                                       vhcr->status = CMD_STAT_BAD_PARAM;
+                               }
+                       }
+                       ret = mlx4_status_to_errno(vhcr->status);
+               }
+       } else {
+               ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
+                                   MLX4_COMM_TIME + timeout);
+               if (!ret) {
+                       if (out_is_imm) {
+                               if (out_param)
+                                       *out_param =
+                                               be64_to_cpu(vhcr->out_param);
+                               else {
+                                       mlx4_err(dev, "response expected while"
+                                                "output mailbox is NULL for "
+                                                "command 0x%x\n", op);
+                                       vhcr->status = CMD_STAT_BAD_PARAM;
+                               }
+                       }
+                       ret = mlx4_status_to_errno(vhcr->status);
+               } else
+                       mlx4_err(dev, "failed execution of VHCR_POST command"
+                                "opcode 0x%x\n", op);
+       }
+       up(&priv->cmd.slave_sem);
+       return ret;
+}
+
 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                         int out_is_imm, u32 in_modifier, u8 op_modifier,
                         u16 op, unsigned long timeout)
@@ -217,6 +418,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        void __iomem *hcr = priv->cmd.hcr;
        int err = 0;
        unsigned long end;
+       u32 stat;
 
        down(&priv->cmd.poll_sem);
 
@@ -240,9 +442,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
                        (u64) be32_to_cpu((__force __be32)
                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
-
-       err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
-                                              __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+       stat = be32_to_cpu((__force __be32)
+                          __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
+       err = mlx4_status_to_errno(stat);
+       if (err)
+               mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+                        op, stat);
 
 out:
        up(&priv->cmd.poll_sem);
@@ -259,6 +464,7 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
        if (token != context->token)
                return;
 
+       context->fw_status = status;
        context->result    = mlx4_status_to_errno(status);
        context->out_param = out_param;
 
@@ -287,14 +493,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
                      in_modifier, op_modifier, op, context->token, 1);
 
-       if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+       if (!wait_for_completion_timeout(&context->done,
+                                        msecs_to_jiffies(timeout))) {
                err = -EBUSY;
                goto out;
        }
 
        err = context->result;
-       if (err)
+       if (err) {
+               mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+                        op, context->fw_status);
                goto out;
+       }
 
        if (out_is_imm)
                *out_param = context->out_param;
@@ -311,17 +521,1046 @@ out:
 
 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
               int out_is_imm, u32 in_modifier, u8 op_modifier,
-              u16 op, unsigned long timeout)
+              u16 op, unsigned long timeout, int native)
 {
-       if (mlx4_priv(dev)->cmd.use_events)
-               return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
-                                    in_modifier, op_modifier, op, timeout);
-       else
-               return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
-                                    in_modifier, op_modifier, op, timeout);
+       if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+               if (mlx4_priv(dev)->cmd.use_events)
+                       return mlx4_cmd_wait(dev, in_param, out_param,
+                                            out_is_imm, in_modifier,
+                                            op_modifier, op, timeout);
+               else
+                       return mlx4_cmd_poll(dev, in_param, out_param,
+                                            out_is_imm, in_modifier,
+                                            op_modifier, op, timeout);
+       }
+       return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
+                             in_modifier, op_modifier, op, timeout);
 }
 EXPORT_SYMBOL_GPL(__mlx4_cmd);
 
+
+static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
+{
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+}
+
+static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
+                          int slave, u64 slave_addr,
+                          int size, int is_read)
+{
+       u64 in_param;
+       u64 out_param;
+
+       if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
+           (slave & ~0x7f) | (size & 0xff)) {
+               mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
+                             "master_addr:0x%llx slave_id:%d size:%d\n",
+                             slave_addr, master_addr, slave, size);
+               return -EINVAL;
+       }
+
+       if (is_read) {
+               in_param = (u64) slave | slave_addr;
+               out_param = (u64) dev->caps.function | master_addr;
+       } else {
+               in_param = (u64) dev->caps.function | master_addr;
+               out_param = (u64) slave | slave_addr;
+       }
+
+       return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
+                           MLX4_CMD_ACCESS_MEM,
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+}
+
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+                    struct mlx4_vhcr *vhcr,
+                    struct mlx4_cmd_mailbox *inbox,
+                    struct mlx4_cmd_mailbox *outbox,
+                    struct mlx4_cmd_info *cmd)
+{
+       u64 in_param;
+       u64 out_param;
+       int err;
+
+       in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
+       out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
+       if (cmd->encode_slave_id) {
+               in_param &= 0xffffffffffffff00ll;
+               in_param |= slave;
+       }
+
+       err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
+                        vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
+                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+       if (cmd->out_is_imm)
+               vhcr->out_param = out_param;
+
+       return err;
+}
+
+static struct mlx4_cmd_info cmd_info[] = {
+       {
+               .opcode = MLX4_CMD_QUERY_FW,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_HCA,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_DEV_CAP,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_FUNC_CAP,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_ADAPTER,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_INIT_PORT,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_INIT_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_CLOSE_PORT,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm  = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CLOSE_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_PORT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SET_PORT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_SET_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_MAP_EQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_MAP_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_EQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW_HEALTH_CHECK,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_NOP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_ALLOC_RES,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = true,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_ALLOC_RES_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_FREE_RES,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_FREE_RES_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_MPT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_MPT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_MPT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_MPT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_MPT,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_MPT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_READ_MTT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_WRITE_MTT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_WRITE_MTT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SYNC_TPT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_EQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_EQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_CQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_CQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_CQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_MODIFY_CQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = true,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_MODIFY_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_SRQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_SRQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_SRQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_ARM_SRQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_ARM_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RST2INIT_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_RST2INIT_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_INIT2INIT_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_INIT2RTR_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_INIT2RTR_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RTR2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RTS2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SQERR2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_2ERR_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RTS2SQD_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SQD2SQD_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SQD2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_2RST_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_2RST_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_QP,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SUSPEND_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_UNSUSPEND_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_IF_STAT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_IF_STAT_wrapper
+       },
+       /* Native multicast commands are not available for guests */
+       {
+               .opcode = MLX4_CMD_QP_ATTACH,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QP_ATTACH_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_PROMISC,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_PROMISC_wrapper
+       },
+       /* Ethernet specific commands */
+       {
+               .opcode = MLX4_CMD_SET_VLAN_FLTR,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_SET_VLAN_FLTR_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SET_MCAST_FLTR,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_SET_MCAST_FLTR_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_DUMP_ETH_STATS,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_DUMP_ETH_STATS_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_INFORM_FLR_DONE,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+};
+
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+                                   struct mlx4_vhcr_cmd *in_vhcr)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cmd_info *cmd = NULL;
+       struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
+       struct mlx4_vhcr *vhcr;
+       struct mlx4_cmd_mailbox *inbox = NULL;
+       struct mlx4_cmd_mailbox *outbox = NULL;
+       u64 in_param;
+       u64 out_param;
+       int ret = 0;
+       int i;
+       int err = 0;
+
+       /* Create sw representation of Virtual HCR */
+       vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
+       if (!vhcr)
+               return -ENOMEM;
+
+       /* DMA in the vHCR */
+       if (!in_vhcr) {
+               ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+                                     priv->mfunc.master.slave_state[slave].vhcr_dma,
+                                     ALIGN(sizeof(struct mlx4_vhcr_cmd),
+                                           MLX4_ACCESS_MEM_ALIGN), 1);
+               if (ret) {
+                       mlx4_err(dev, "%s:Failed reading vhcr"
+                                "ret: 0x%x\n", __func__, ret);
+                       kfree(vhcr);
+                       return ret;
+               }
+       }
+
+       /* Fill SW VHCR fields */
+       vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
+       vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
+       vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
+       vhcr->token = be16_to_cpu(vhcr_cmd->token);
+       vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
+       vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
+       vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
+
+       /* Lookup command */
+       for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
+               if (vhcr->op == cmd_info[i].opcode) {
+                       cmd = &cmd_info[i];
+                       break;
+               }
+       }
+       if (!cmd) {
+               mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
+                        vhcr->op, slave);
+               vhcr_cmd->status = CMD_STAT_BAD_PARAM;
+               goto out_status;
+       }
+
+       /* Read inbox */
+       if (cmd->has_inbox) {
+               vhcr->in_param &= INBOX_MASK;
+               inbox = mlx4_alloc_cmd_mailbox(dev);
+               if (IS_ERR(inbox)) {
+                       vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+                       inbox = NULL;
+                       goto out_status;
+               }
+
+               if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+                                   vhcr->in_param,
+                                   MLX4_MAILBOX_SIZE, 1)) {
+                       mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
+                                __func__, cmd->opcode);
+                       vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
+                       goto out_status;
+               }
+       }
+
+       /* Apply permission and bound checks if applicable */
+       if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
+               mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
+                         "checks for resource_id:%d\n", vhcr->op, slave,
+                         vhcr->in_modifier);
+               vhcr_cmd->status = CMD_STAT_BAD_OP;
+               goto out_status;
+       }
+
+       /* Allocate outbox */
+       if (cmd->has_outbox) {
+               outbox = mlx4_alloc_cmd_mailbox(dev);
+               if (IS_ERR(outbox)) {
+                       vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+                       outbox = NULL;
+                       goto out_status;
+               }
+       }
+
+       /* Execute the command! */
+       if (cmd->wrapper) {
+               err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
+                                  cmd);
+               if (cmd->out_is_imm)
+                       vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+       } else {
+               in_param = cmd->has_inbox ? (u64) inbox->dma :
+                       vhcr->in_param;
+               out_param = cmd->has_outbox ? (u64) outbox->dma :
+                       vhcr->out_param;
+               err = __mlx4_cmd(dev, in_param, &out_param,
+                                cmd->out_is_imm, vhcr->in_modifier,
+                                vhcr->op_modifier, vhcr->op,
+                                MLX4_CMD_TIME_CLASS_A,
+                                MLX4_CMD_NATIVE);
+
+               if (cmd->out_is_imm) {
+                       vhcr->out_param = out_param;
+                       vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+               }
+       }
+
+       if (err) {
+               mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
+                         " error:%d, status %d\n",
+                         vhcr->op, slave, vhcr->errno, err);
+               vhcr_cmd->status = mlx4_errno_to_status(err);
+               goto out_status;
+       }
+
+
+       /* Write outbox if command completed successfully */
+       if (cmd->has_outbox && !vhcr_cmd->status) {
+               ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
+                                     vhcr->out_param,
+                                     MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
+               if (ret) {
+                       /* If we failed to write back the outbox after the
+                        *command was successfully executed, we must fail this
+                        * slave, as it is now in undefined state */
+                       mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
+                       goto out;
+               }
+       }
+
+out_status:
+       /* DMA back vhcr result */
+       if (!in_vhcr) {
+               ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+                                     priv->mfunc.master.slave_state[slave].vhcr_dma,
+                                     ALIGN(sizeof(struct mlx4_vhcr),
+                                           MLX4_ACCESS_MEM_ALIGN),
+                                     MLX4_CMD_WRAPPED);
+               if (ret)
+                       mlx4_err(dev, "%s:Failed writing vhcr result\n",
+                                __func__);
+               else if (vhcr->e_bit &&
+                        mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
+                               mlx4_warn(dev, "Failed to generate command completion "
+                                         "eqe for slave %d\n", slave);
+       }
+
+out:
+       kfree(vhcr);
+       mlx4_free_cmd_mailbox(dev, inbox);
+       mlx4_free_cmd_mailbox(dev, outbox);
+       return ret;
+}
+
+static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
+                              u16 param, u8 toggle)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+       u32 reply;
+       u32 slave_status = 0;
+       u8 is_going_down = 0;
+
+       slave_state[slave].comm_toggle ^= 1;
+       reply = (u32) slave_state[slave].comm_toggle << 31;
+       if (toggle != slave_state[slave].comm_toggle) {
+               mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
+                         "STATE COMPROMISIED ***\n", toggle, slave);
+               goto reset_slave;
+       }
+       if (cmd == MLX4_COMM_CMD_RESET) {
+               mlx4_warn(dev, "Received reset from slave:%d\n", slave);
+               slave_state[slave].active = false;
+               /*check if we are in the middle of FLR process,
+               if so return "retry" status to the slave*/
+               if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+                       slave_status = MLX4_DELAY_RESET_SLAVE;
+                       goto inform_slave_state;
+               }
+
+               /* write the version in the event field */
+               reply |= mlx4_comm_get_version();
+
+               goto reset_slave;
+       }
+       /*command from slave in the middle of FLR*/
+       if (cmd != MLX4_COMM_CMD_RESET &&
+           MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+               mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
+                         "in the middle of FLR\n", slave, cmd);
+               return;
+       }
+
+       switch (cmd) {
+       case MLX4_COMM_CMD_VHCR0:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma = ((u64) param) << 48;
+               priv->mfunc.master.slave_state[slave].cookie = 0;
+               mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
+               break;
+       case MLX4_COMM_CMD_VHCR1:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma |= ((u64) param) << 32;
+               break;
+       case MLX4_COMM_CMD_VHCR2:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma |= ((u64) param) << 16;
+               break;
+       case MLX4_COMM_CMD_VHCR_EN:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma |= param;
+               slave_state[slave].active = true;
+               break;
+       case MLX4_COMM_CMD_VHCR_POST:
+               if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
+                   (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+                       goto reset_slave;
+               down(&priv->cmd.slave_sem);
+               if (mlx4_master_process_vhcr(dev, slave, NULL)) {
+                       mlx4_err(dev, "Failed processing vhcr for slave:%d,"
+                                " reseting slave.\n", slave);
+                       up(&priv->cmd.slave_sem);
+                       goto reset_slave;
+               }
+               up(&priv->cmd.slave_sem);
+               break;
+       default:
+               mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
+               goto reset_slave;
+       }
+       spin_lock(&priv->mfunc.master.slave_state_lock);
+       if (!slave_state[slave].is_slave_going_down)
+               slave_state[slave].last_cmd = cmd;
+       else
+               is_going_down = 1;
+       spin_unlock(&priv->mfunc.master.slave_state_lock);
+       if (is_going_down) {
+               mlx4_warn(dev, "Slave is going down aborting command(%d)"
+                         " executing from slave:%d\n",
+                         cmd, slave);
+               return;
+       }
+       __raw_writel((__force u32) cpu_to_be32(reply),
+                    &priv->mfunc.comm[slave].slave_read);
+       mmiowb();
+
+       return;
+
+reset_slave:
+       /* cleanup any slave resources */
+       mlx4_delete_all_resources_for_slave(dev, slave);
+       spin_lock(&priv->mfunc.master.slave_state_lock);
+       if (!slave_state[slave].is_slave_going_down)
+               slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
+       spin_unlock(&priv->mfunc.master.slave_state_lock);
+       /*with slave in the middle of flr, no need to clean resources again.*/
+inform_slave_state:
+       memset(&slave_state[slave].event_eq, 0,
+              sizeof(struct mlx4_slave_event_eq_info));
+       __raw_writel((__force u32) cpu_to_be32(reply),
+                    &priv->mfunc.comm[slave].slave_read);
+       wmb();
+}
+
+/* master command processing */
+void mlx4_master_comm_channel(struct work_struct *work)
+{
+       struct mlx4_mfunc_master_ctx *master =
+               container_of(work,
+                            struct mlx4_mfunc_master_ctx,
+                            comm_work);
+       struct mlx4_mfunc *mfunc =
+               container_of(master, struct mlx4_mfunc, master);
+       struct mlx4_priv *priv =
+               container_of(mfunc, struct mlx4_priv, mfunc);
+       struct mlx4_dev *dev = &priv->dev;
+       __be32 *bit_vec;
+       u32 comm_cmd;
+       u32 vec;
+       int i, j, slave;
+       int toggle;
+       int served = 0;
+       int reported = 0;
+       u32 slt;
+
+       bit_vec = master->comm_arm_bit_vector;
+       for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
+               vec = be32_to_cpu(bit_vec[i]);
+               for (j = 0; j < 32; j++) {
+                       if (!(vec & (1 << j)))
+                               continue;
+                       ++reported;
+                       slave = (i * 32) + j;
+                       comm_cmd = swab32(readl(
+                                         &mfunc->comm[slave].slave_write));
+                       slt = swab32(readl(&mfunc->comm[slave].slave_read))
+                                    >> 31;
+                       toggle = comm_cmd >> 31;
+                       if (toggle != slt) {
+                               if (master->slave_state[slave].comm_toggle
+                                   != slt) {
+                                       printk(KERN_INFO "slave %d out of sync."
+                                              " read toggle %d, state toggle %d. "
+                                              "Resynching.\n", slave, slt,
+                                              master->slave_state[slave].comm_toggle);
+                                       master->slave_state[slave].comm_toggle =
+                                               slt;
+                               }
+                               mlx4_master_do_cmd(dev, slave,
+                                                  comm_cmd >> 16 & 0xff,
+                                                  comm_cmd & 0xffff, toggle);
+                               ++served;
+                       }
+               }
+       }
+
+       if (reported && reported != served)
+               mlx4_warn(dev, "Got command event with bitmask from %d slaves"
+                         " but %d were served\n",
+                         reported, served);
+
+       if (mlx4_ARM_COMM_CHANNEL(dev))
+               mlx4_warn(dev, "Failed to arm comm channel events\n");
+}
+
+static int sync_toggles(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int wr_toggle;
+       int rd_toggle;
+       unsigned long end;
+
+       wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
+       end = jiffies + msecs_to_jiffies(5000);
+
+       while (time_before(jiffies, end)) {
+               rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
+               if (rd_toggle == wr_toggle) {
+                       priv->cmd.comm_toggle = rd_toggle;
+                       return 0;
+               }
+
+               cond_resched();
+       }
+
+       /*
+        * we could reach here if for example the previous VM using this
+        * function misbehaved and left the channel with unsynced state. We
+        * should fix this here and give this VM a chance to use a properly
+        * synced channel
+        */
+       mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
+       __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
+       __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
+       priv->cmd.comm_toggle = 0;
+
+       return 0;
+}
+
+int mlx4_multi_func_init(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_state;
+       int i, err, port;
+
+       priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                           &priv->mfunc.vhcr_dma,
+                                           GFP_KERNEL);
+       if (!priv->mfunc.vhcr) {
+               mlx4_err(dev, "Couldn't allocate vhcr.\n");
+               return -ENOMEM;
+       }
+
+       if (mlx4_is_master(dev))
+               priv->mfunc.comm =
+               ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
+                       priv->fw.comm_base, MLX4_COMM_PAGESIZE);
+       else
+               priv->mfunc.comm =
+               ioremap(pci_resource_start(dev->pdev, 2) +
+                       MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
+       if (!priv->mfunc.comm) {
+               mlx4_err(dev, "Couldn't map communication vector.\n");
+               goto err_vhcr;
+       }
+
+       if (mlx4_is_master(dev)) {
+               priv->mfunc.master.slave_state =
+                       kzalloc(dev->num_slaves *
+                               sizeof(struct mlx4_slave_state), GFP_KERNEL);
+               if (!priv->mfunc.master.slave_state)
+                       goto err_comm;
+
+               for (i = 0; i < dev->num_slaves; ++i) {
+                       s_state = &priv->mfunc.master.slave_state[i];
+                       s_state->last_cmd = MLX4_COMM_CMD_RESET;
+                       __raw_writel((__force u32) 0,
+                                    &priv->mfunc.comm[i].slave_write);
+                       __raw_writel((__force u32) 0,
+                                    &priv->mfunc.comm[i].slave_read);
+                       mmiowb();
+                       for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+                               s_state->vlan_filter[port] =
+                                       kzalloc(sizeof(struct mlx4_vlan_fltr),
+                                               GFP_KERNEL);
+                               if (!s_state->vlan_filter[port]) {
+                                       if (--port)
+                                               kfree(s_state->vlan_filter[port]);
+                                       goto err_slaves;
+                               }
+                               INIT_LIST_HEAD(&s_state->mcast_filters[port]);
+                       }
+                       spin_lock_init(&s_state->lock);
+               }
+
+               memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+               priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
+               INIT_WORK(&priv->mfunc.master.comm_work,
+                         mlx4_master_comm_channel);
+               INIT_WORK(&priv->mfunc.master.slave_event_work,
+                         mlx4_gen_slave_eqe);
+               INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
+                         mlx4_master_handle_slave_flr);
+               spin_lock_init(&priv->mfunc.master.slave_state_lock);
+               priv->mfunc.master.comm_wq =
+                       create_singlethread_workqueue("mlx4_comm");
+               if (!priv->mfunc.master.comm_wq)
+                       goto err_slaves;
+
+               if (mlx4_init_resource_tracker(dev))
+                       goto err_thread;
+
+               sema_init(&priv->cmd.slave_sem, 1);
+               err = mlx4_ARM_COMM_CHANNEL(dev);
+               if (err) {
+                       mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
+                                err);
+                       goto err_resource;
+               }
+
+       } else {
+               err = sync_toggles(dev);
+               if (err) {
+                       mlx4_err(dev, "Couldn't sync toggles\n");
+                       goto err_comm;
+               }
+
+               sema_init(&priv->cmd.slave_sem, 1);
+       }
+       return 0;
+
+err_resource:
+       mlx4_free_resource_tracker(dev);
+err_thread:
+       flush_workqueue(priv->mfunc.master.comm_wq);
+       destroy_workqueue(priv->mfunc.master.comm_wq);
+err_slaves:
+       while (--i) {
+               for (port = 1; port <= MLX4_MAX_PORTS; port++)
+                       kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+       }
+       kfree(priv->mfunc.master.slave_state);
+err_comm:
+       iounmap(priv->mfunc.comm);
+err_vhcr:
+       dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                            priv->mfunc.vhcr,
+                                            priv->mfunc.vhcr_dma);
+       priv->mfunc.vhcr = NULL;
+       return -ENOMEM;
+}
+
 int mlx4_cmd_init(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -331,22 +1570,51 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
        priv->cmd.use_events = 0;
        priv->cmd.toggle     = 1;
 
-       priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
-                               MLX4_HCR_SIZE);
-       if (!priv->cmd.hcr) {
-               mlx4_err(dev, "Couldn't map command register.");
-               return -ENOMEM;
+       priv->cmd.hcr = NULL;
+       priv->mfunc.vhcr = NULL;
+
+       if (!mlx4_is_slave(dev)) {
+               priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
+                                       MLX4_HCR_BASE, MLX4_HCR_SIZE);
+               if (!priv->cmd.hcr) {
+                       mlx4_err(dev, "Couldn't map command register.\n");
+                       return -ENOMEM;
+               }
        }
 
        priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
                                         MLX4_MAILBOX_SIZE,
                                         MLX4_MAILBOX_SIZE, 0);
-       if (!priv->cmd.pool) {
-               iounmap(priv->cmd.hcr);
-               return -ENOMEM;
-       }
+       if (!priv->cmd.pool)
+               goto err_hcr;
 
        return 0;
+
+err_hcr:
+       if (!mlx4_is_slave(dev))
+               iounmap(priv->cmd.hcr);
+       return -ENOMEM;
+}
+
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i, port;
+
+       if (mlx4_is_master(dev)) {
+               flush_workqueue(priv->mfunc.master.comm_wq);
+               destroy_workqueue(priv->mfunc.master.comm_wq);
+               for (i = 0; i < dev->num_slaves; i++) {
+                       for (port = 1; port <= MLX4_MAX_PORTS; port++)
+                               kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+               }
+               kfree(priv->mfunc.master.slave_state);
+               iounmap(priv->mfunc.comm);
+               dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                                    priv->mfunc.vhcr,
+                                                    priv->mfunc.vhcr_dma);
+               priv->mfunc.vhcr = NULL;
+       }
 }
 
 void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -354,7 +1622,9 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
 
        pci_pool_destroy(priv->cmd.pool);
-       iounmap(priv->cmd.hcr);
+
+       if (!mlx4_is_slave(dev))
+               iounmap(priv->cmd.hcr);
 }
 
 /*
@@ -365,6 +1635,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        int i;
+       int err = 0;
 
        priv->cmd.context = kmalloc(priv->cmd.max_cmds *
                                   sizeof (struct mlx4_cmd_context),
@@ -389,11 +1660,10 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
                ; /* nothing */
        --priv->cmd.token_mask;
 
-       priv->cmd.use_events = 1;
-
        down(&priv->cmd.poll_sem);
+       priv->cmd.use_events = 1;
 
-       return 0;
+       return err;
 }
 
 /*
@@ -433,7 +1703,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
 }
 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
 
-void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
+                          struct mlx4_cmd_mailbox *mailbox)
 {
        if (!mailbox)
                return;
@@ -442,3 +1713,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
        kfree(mailbox);
 }
 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
+
+u32 mlx4_comm_get_version(void)
+{
+        return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
+}
index 499a516..475f9d6 100644 (file)
@@ -34,9 +34,9 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/hardirq.h>
 #include <linux/export.h>
-#include <linux/gfp.h>
 
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/cq.h>
 #include "mlx4.h"
 #include "icm.h"
 
-struct mlx4_cq_context {
-       __be32                  flags;
-       u16                     reserved1[3];
-       __be16                  page_offset;
-       __be32                  logsize_usrpage;
-       __be16                  cq_period;
-       __be16                  cq_max_count;
-       u8                      reserved2[3];
-       u8                      comp_eqn;
-       u8                      log_page_size;
-       u8                      reserved3[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       __be32                  last_notified_index;
-       __be32                  solicit_producer_index;
-       __be32                  consumer_index;
-       __be32                  producer_index;
-       u32                     reserved4[2];
-       __be64                  db_rec_addr;
-};
-
 #define MLX4_CQ_STATUS_OK              ( 0 << 28)
 #define MLX4_CQ_STATUS_OVERFLOW                ( 9 << 28)
 #define MLX4_CQ_STATUS_WRITE_FAIL      (10 << 28)
@@ -81,7 +60,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
        cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
                               cqn & (dev->caps.num_cqs - 1));
        if (!cq) {
-               mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+               mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
                return;
        }
 
@@ -117,23 +96,24 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int cq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
-                       MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0,
+                       MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int cq_num, u32 opmod)
 {
        return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int cq_num)
 {
-       return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
-                           mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
-                           MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0,
+                           cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
@@ -188,6 +168,78 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
 }
 EXPORT_SYMBOL_GPL(mlx4_cq_resize);
 
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cq_table *cq_table = &priv->cq_table;
+       int err;
+
+       *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+       if (*cqn == -1)
+               return -ENOMEM;
+
+       err = mlx4_table_get(dev, &cq_table->table, *cqn);
+       if (err)
+               goto err_out;
+
+       err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
+       if (err)
+               goto err_put;
+       return 0;
+
+err_put:
+       mlx4_table_put(dev, &cq_table->table, *cqn);
+
+err_out:
+       mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+       return err;
+}
+
+static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       return err;
+               else {
+                       *cqn = get_param_l(&out_param);
+                       return 0;
+               }
+       }
+       return __mlx4_cq_alloc_icm(dev, cqn);
+}
+
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cq_table *cq_table = &priv->cq_table;
+
+       mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
+       mlx4_table_put(dev, &cq_table->table, cqn);
+       mlx4_bitmap_free(&cq_table->bitmap, cqn);
+}
+
+static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, cqn);
+               err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
+                              MLX4_CMD_FREE_RES,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
+       } else
+               __mlx4_cq_free_icm(dev, cqn);
+}
+
 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
                  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
                  unsigned vector, int collapsed)
@@ -204,23 +256,15 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
 
        cq->vector = vector;
 
-       cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
-       if (cq->cqn == -1)
-               return -ENOMEM;
-
-       err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
-       if (err)
-               goto err_out;
-
-       err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+       err = mlx4_cq_alloc_icm(dev, &cq->cqn);
        if (err)
-               goto err_put;
+               return err;
 
        spin_lock_irq(&cq_table->lock);
        err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
        spin_unlock_irq(&cq_table->lock);
        if (err)
-               goto err_cmpt_put;
+               goto err_icm;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
@@ -259,14 +303,8 @@ err_radix:
        radix_tree_delete(&cq_table->tree, cq->cqn);
        spin_unlock_irq(&cq_table->lock);
 
-err_cmpt_put:
-       mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
-
-err_put:
-       mlx4_table_put(dev, &cq_table->table, cq->cqn);
-
-err_out:
-       mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+err_icm:
+       mlx4_cq_free_icm(dev, cq->cqn);
 
        return err;
 }
@@ -292,8 +330,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
                complete(&cq->free);
        wait_for_completion(&cq->free);
 
-       mlx4_table_put(dev, &cq_table->table, cq->cqn);
-       mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+       mlx4_cq_free_icm(dev, cq->cqn);
 }
 EXPORT_SYMBOL_GPL(mlx4_cq_free);
 
@@ -304,6 +341,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
 
        spin_lock_init(&cq_table->lock);
        INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+       if (mlx4_is_slave(dev))
+               return 0;
 
        err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
                               dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
@@ -315,6 +354,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
 {
+       if (mlx4_is_slave(dev))
+               return;
        /* Nothing to do to clean up radix_tree */
        mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
 }
index 4c5bbb3..72fa807 100644 (file)
@@ -45,7 +45,7 @@
 #include "mlx4_en.h"
 #include "en_port.h"
 
-static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
@@ -67,9 +67,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                en_err(priv, "failed adding vlan %d\n", vid);
        mutex_unlock(&mdev->state_lock);
 
+       return 0;
 }
 
-static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
@@ -93,6 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
                        en_err(priv, "Failed configuring VLAN filter\n");
        }
        mutex_unlock(&mdev->state_lock);
+
+       return 0;
 }
 
 u64 mlx4_en_mac_to_u64(u8 *addr)
@@ -133,7 +136,7 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
        if (priv->port_up) {
                /* Remove old MAC and insert the new one */
                err = mlx4_replace_mac(mdev->dev, priv->port,
-                                      priv->base_qpn, priv->mac, 0);
+                                      priv->base_qpn, priv->mac);
                if (err)
                        en_err(priv, "Failed changing HW MAC address\n");
        } else
@@ -148,6 +151,7 @@ static void mlx4_en_clear_list(struct net_device *dev)
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
        kfree(priv->mc_addrs);
+       priv->mc_addrs = NULL;
        priv->mc_addrs_cnt = 0;
 }
 
@@ -167,6 +171,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
        i = 0;
        netdev_for_each_mc_addr(ha, dev)
                memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+       mlx4_en_clear_list(dev);
        priv->mc_addrs = mc_addrs;
        priv->mc_addrs_cnt = mc_addrs_cnt;
 }
@@ -204,6 +209,16 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
                goto out;
        }
 
+       if (!netif_carrier_ok(dev)) {
+               if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+                       if (priv->port_state.link_state) {
+                               priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+                               netif_carrier_on(dev);
+                               en_dbg(LINK, priv, "Link Up\n");
+                       }
+               }
+       }
+
        /*
         * Promsicuous mode: disable all filters
         */
@@ -599,12 +614,12 @@ int mlx4_en_start_port(struct net_device *dev)
                ++rx_index;
        }
 
-       /* Set port mac number */
-       en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
-       err = mlx4_register_mac(mdev->dev, priv->port,
-                               priv->mac, &priv->base_qpn, 0);
+       /* Set qp number */
+       en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
+       err = mlx4_get_eth_qp(mdev->dev, priv->port,
+                               priv->mac, &priv->base_qpn);
        if (err) {
-               en_err(priv, "Failed setting port mac\n");
+               en_err(priv, "Failed getting eth qp\n");
                goto cq_err;
        }
        mdev->mac_removed[priv->port] = 0;
@@ -699,7 +714,7 @@ tx_err:
 
        mlx4_en_release_rss_steer(priv);
 mac_err:
-       mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+       mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
 cq_err:
        while (rx_index--)
                mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -745,10 +760,6 @@ void mlx4_en_stop_port(struct net_device *dev)
        /* Flush multicast filter */
        mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
 
-       /* Unregister Mac address for the port */
-       mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
-       mdev->mac_removed[priv->port] = 1;
-
        /* Free TX Rings */
        for (i = 0; i < priv->tx_ring_num; i++) {
                mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -762,6 +773,10 @@ void mlx4_en_stop_port(struct net_device *dev)
        /* Free RSS qps */
        mlx4_en_release_rss_steer(priv);
 
+       /* Unregister Mac address for the port */
+       mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+       mdev->mac_removed[priv->port] = 1;
+
        /* Free RX Rings */
        for (i = 0; i < priv->rx_ring_num; i++) {
                mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
index 03c84cd..3317914 100644 (file)
 #include "mlx4_en.h"
 
 
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
-                       u64 mac, u64 clear, u8 mode)
-{
-       return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
-                       MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
-}
-
 int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -72,76 +65,7 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
                filter->entry[i] = cpu_to_be32(entry);
        }
        err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
-                      MLX4_CMD_TIME_CLASS_B);
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
-                         u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_general_context *context;
-       int err;
-       u32 in_mod;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       context = mailbox->buf;
-       memset(context, 0, sizeof *context);
-
-       context->flags = SET_PORT_GEN_ALL_VALID;
-       context->mtu = cpu_to_be16(mtu);
-       context->pptx = (pptx * (!pfctx)) << 7;
-       context->pfctx = pfctx;
-       context->pprx = (pprx * (!pfcrx)) << 7;
-       context->pfcrx = pfcrx;
-
-       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
-                          u8 promisc)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_rqp_calc_context *context;
-       int err;
-       u32 in_mod;
-       u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
-                                               MCAST_DIRECT : MCAST_DEFAULT;
-
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
-                       dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
-               return 0;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       context = mailbox->buf;
-       memset(context, 0, sizeof *context);
-
-       context->base_qpn = cpu_to_be32(base_qpn);
-       context->n_mac = dev->caps.log_num_macs;
-       context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
-                                      base_qpn);
-       context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
-                                    base_qpn);
-       context->intra_no_vlan = 0;
-       context->no_vlan = MLX4_NO_VLAN_IDX;
-       context->intra_vlan_miss = 0;
-       context->vlan_miss = MLX4_VLAN_MISS_IDX;
-
-       in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
@@ -159,7 +83,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
                return PTR_ERR(mailbox);
        memset(mailbox->buf, 0, sizeof(*qport_context));
        err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
-                          MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+                          MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_WRAPPED);
        if (err)
                goto out;
        qport_context = mailbox->buf;
@@ -204,7 +129,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
                return PTR_ERR(mailbox);
        memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
        err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
-                          MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+                          MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_WRAPPED);
        if (err)
                goto out;
 
index 19eb244..6934fd7 100644 (file)
 #define SET_PORT_PROMISC_SHIFT 31
 #define SET_PORT_MC_PROMISC_SHIFT      30
 
-enum {
-       MLX4_CMD_SET_VLAN_FLTR  = 0x47,
-       MLX4_CMD_SET_MCAST_FLTR = 0x48,
-       MLX4_CMD_DUMP_ETH_STATS = 0x49,
-};
-
-enum {
-       MCAST_DIRECT_ONLY       = 0,
-       MCAST_DIRECT            = 1,
-       MCAST_DEFAULT           = 2
-};
-
-struct mlx4_set_port_general_context {
-       u8 reserved[3];
-       u8 flags;
-       u16 reserved2;
-       __be16 mtu;
-       u8 pptx;
-       u8 pfctx;
-       u16 reserved3;
-       u8 pprx;
-       u8 pfcrx;
-       u16 reserved4;
-};
-
-struct mlx4_set_port_rqp_calc_context {
-       __be32 base_qpn;
-       u8 rererved;
-       u8 n_mac;
-       u8 n_vlan;
-       u8 n_prio;
-       u8 reserved2[3];
-       u8 mac_miss;
-       u8 intra_no_vlan;
-       u8 no_vlan;
-       u8 intra_vlan_miss;
-       u8 vlan_miss;
-       u8 reserved3[3];
-       u8 no_vlan_prio;
-       __be32 promisc;
-       __be32 mcast;
-};
-
 #define VLAN_FLTR_SIZE 128
 struct mlx4_set_vlan_fltr_mbox {
        __be32 entry[VLAN_FLTR_SIZE];
index ce1bc57..e8d6ad2 100644 (file)
@@ -541,6 +541,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        unsigned int length;
        int polled = 0;
        int ip_summed;
+       struct ethhdr *ethh;
+       u64 s_mac;
 
        if (!priv->port_up)
                return 0;
@@ -577,6 +579,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                        goto next;
                }
 
+               /* Get pointer to first fragment since we haven't skb yet and
+                * cast it to ethhdr struct */
+               ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
+                                        skb_frags[0].offset);
+               s_mac = mlx4_en_mac_to_u64(ethh->h_source);
+
+               /* If source MAC is equal to our own MAC and not performing
+                * the selftest or flb disabled - drop the packet */
+               if (s_mac == priv->mac &&
+                       (!(dev->features & NETIF_F_LOOPBACK) ||
+                        !priv->validate_loopback))
+                       goto next;
+
                /*
                 * Packet is OK - process it.
                 */
@@ -883,11 +898,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
        rss_context = ptr;
        rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
                                            (rss_map->base_qpn));
+       rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
        if (priv->mdev->profile.udp_rss) {
                rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
                rss_context->base_qpn_udp = rss_context->default_qpn;
        }
-       rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
        rss_context->flags = rss_mask;
        rss_context->hash_fn = MLX4_RSS_HASH_TOP;
        for (i = 0; i < 10; i++)
index 9fdbcec..bf2e5d3 100644 (file)
@@ -43,7 +43,7 @@
 static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
 {
        return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
index 7e76862..9ef9038 100644 (file)
@@ -688,17 +688,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                ring->tx_csum++;
        }
 
-       if (unlikely(priv->validate_loopback)) {
-               /* Copy dst mac address to wqe */
-               skb_reset_mac_header(skb);
-               ethh = eth_hdr(skb);
-               if (ethh && ethh->h_dest) {
-                       mac = mlx4_en_mac_to_u64(ethh->h_dest);
-                       mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
-                       mac_l = (u32) (mac & 0xffffffff);
-                       tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
-                       tx_desc->ctrl.imm = cpu_to_be32(mac_l);
-               }
+       /* Copy dst mac address to wqe */
+       skb_reset_mac_header(skb);
+       ethh = eth_hdr(skb);
+       if (ethh && ethh->h_dest) {
+               mac = mlx4_en_mac_to_u64(ethh->h_dest);
+               mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+               mac_l = (u32) (mac & 0xffffffff);
+               tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+               tx_desc->ctrl.imm = cpu_to_be32(mac_l);
        }
 
        /* Handle LSO (TSO) packets */
index 24ee967..1e9b55e 100644 (file)
@@ -31,6 +31,7 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -52,30 +53,6 @@ enum {
        MLX4_EQ_ENTRY_SIZE      = 0x20
 };
 
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_eq_context {
-       __be32                  flags;
-       u16                     reserved1[3];
-       __be16                  page_offset;
-       u8                      log_eq_size;
-       u8                      reserved2[4];
-       u8                      eq_period;
-       u8                      reserved3;
-       u8                      eq_max_count;
-       u8                      reserved4[3];
-       u8                      intr;
-       u8                      log_page_size;
-       u8                      reserved5[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       u32                     reserved6[2];
-       __be32                  consumer_index;
-       __be32                  producer_index;
-       u32                     reserved7[4];
-};
-
 #define MLX4_EQ_STATUS_OK         ( 0 << 28)
 #define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
 #define MLX4_EQ_OWNER_SW          ( 0 << 24)
@@ -100,46 +77,9 @@ struct mlx4_eq_context {
                               (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
                               (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
                               (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)          | \
-                              (1ull << MLX4_EVENT_TYPE_CMD))
-
-struct mlx4_eqe {
-       u8                      reserved1;
-       u8                      type;
-       u8                      reserved2;
-       u8                      subtype;
-       union {
-               u32             raw[6];
-               struct {
-                       __be32  cqn;
-               } __packed comp;
-               struct {
-                       u16     reserved1;
-                       __be16  token;
-                       u32     reserved2;
-                       u8      reserved3[3];
-                       u8      status;
-                       __be64  out_param;
-               } __packed cmd;
-               struct {
-                       __be32  qpn;
-               } __packed qp;
-               struct {
-                       __be32  srqn;
-               } __packed srq;
-               struct {
-                       __be32  cqn;
-                       u32     reserved1;
-                       u8      reserved2[3];
-                       u8      syndrome;
-               } __packed cq_err;
-               struct {
-                       u32     reserved1[2];
-                       __be32  port;
-               } __packed port_change;
-       }                       event;
-       u8                      reserved3[3];
-       u8                      owner;
-} __packed;
+                              (1ull << MLX4_EVENT_TYPE_CMD)                | \
+                              (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL)       | \
+                              (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
 
 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
 {
@@ -162,13 +102,144 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
        return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
 }
 
+static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
+{
+       struct mlx4_eqe *eqe =
+               &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
+       return (!!(eqe->owner & 0x80) ^
+               !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
+               eqe : NULL;
+}
+
+void mlx4_gen_slave_eqe(struct work_struct *work)
+{
+       struct mlx4_mfunc_master_ctx *master =
+               container_of(work, struct mlx4_mfunc_master_ctx,
+                            slave_event_work);
+       struct mlx4_mfunc *mfunc =
+               container_of(master, struct mlx4_mfunc, master);
+       struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
+       struct mlx4_eqe *eqe;
+       u8 slave;
+       int i;
+
+       for (eqe = next_slave_event_eqe(slave_eq); eqe;
+             eqe = next_slave_event_eqe(slave_eq)) {
+               slave = eqe->slave_id;
+
+               /* All active slaves need to receive the event */
+               if (slave == ALL_SLAVES) {
+                       for (i = 0; i < dev->num_slaves; i++) {
+                               if (i != dev->caps.function &&
+                                   master->slave_state[i].active)
+                                       if (mlx4_GEN_EQE(dev, i, eqe))
+                                               mlx4_warn(dev, "Failed to "
+                                                         " generate event "
+                                                         "for slave %d\n", i);
+                       }
+               } else {
+                       if (mlx4_GEN_EQE(dev, slave, eqe))
+                               mlx4_warn(dev, "Failed to generate event "
+                                              "for slave %d\n", slave);
+               }
+               ++slave_eq->cons;
+       }
+}
+
+
+static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
+       struct mlx4_eqe *s_eqe =
+               &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+
+       if ((!!(s_eqe->owner & 0x80)) ^
+           (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
+               mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
+                         "No free EQE on slave events queue\n", slave);
+               return;
+       }
+
+       memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+       s_eqe->slave_id = slave;
+       /* ensure all information is written before setting the ownersip bit */
+       wmb();
+       s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
+       ++slave_eq->prod;
+
+       queue_work(priv->mfunc.master.comm_wq,
+                  &priv->mfunc.master.slave_event_work);
+}
+
+static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
+                            struct mlx4_eqe *eqe)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_slave =
+               &priv->mfunc.master.slave_state[slave];
+
+       if (!s_slave->active) {
+               /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+               return;
+       }
+
+       slave_event(dev, slave, eqe);
+}
+
+void mlx4_master_handle_slave_flr(struct work_struct *work)
+{
+       struct mlx4_mfunc_master_ctx *master =
+               container_of(work, struct mlx4_mfunc_master_ctx,
+                            slave_flr_event_work);
+       struct mlx4_mfunc *mfunc =
+               container_of(master, struct mlx4_mfunc, master);
+       struct mlx4_priv *priv =
+               container_of(mfunc, struct mlx4_priv, mfunc);
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+       int i;
+       int err;
+
+       mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
+
+       for (i = 0 ; i < dev->num_slaves; i++) {
+
+               if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
+                       mlx4_dbg(dev, "mlx4_handle_slave_flr: "
+                                "clean slave: %d\n", i);
+
+                       mlx4_delete_all_resources_for_slave(dev, i);
+                       /*return the slave to running mode*/
+                       spin_lock(&priv->mfunc.master.slave_state_lock);
+                       slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
+                       slave_state[i].is_slave_going_down = 0;
+                       spin_unlock(&priv->mfunc.master.slave_state_lock);
+                       /*notify the FW:*/
+                       err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
+                                      MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+                       if (err)
+                               mlx4_warn(dev, "Failed to notify FW on "
+                                         "FLR done (slave:%d)\n", i);
+               }
+       }
+}
+
 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 {
+       struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_eqe *eqe;
        int cqn;
        int eqes_found = 0;
        int set_ci = 0;
        int port;
+       int slave = 0;
+       int ret;
+       u32 flr_slave;
+       u8 update_slave_state;
+       int i;
 
        while ((eqe = next_eqe_sw(eq))) {
                /*
@@ -191,14 +262,68 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
                case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
                case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
-                       mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
-                                     eqe->type);
+                       mlx4_dbg(dev, "event %d arrived\n", eqe->type);
+                       if (mlx4_is_master(dev)) {
+                               /* forward only to slave owning the QP */
+                               ret = mlx4_get_slave_from_resource_id(dev,
+                                               RES_QP,
+                                               be32_to_cpu(eqe->event.qp.qpn)
+                                               & 0xffffff, &slave);
+                               if (ret && ret != -ENOENT) {
+                                       mlx4_dbg(dev, "QP event %02x(%02x) on "
+                                                "EQ %d at index %u: could "
+                                                "not get slave id (%d)\n",
+                                                eqe->type, eqe->subtype,
+                                                eq->eqn, eq->cons_index, ret);
+                                       break;
+                               }
+
+                               if (!ret && slave != dev->caps.function) {
+                                       mlx4_slave_event(dev, slave, eqe);
+                                       break;
+                               }
+
+                       }
+                       mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
+                                     0xffffff, eqe->type);
                        break;
 
                case MLX4_EVENT_TYPE_SRQ_LIMIT:
+                       mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+                                 __func__);
                case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
-                       mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
-                                     eqe->type);
+                       if (mlx4_is_master(dev)) {
+                               /* forward only to slave owning the SRQ */
+                               ret = mlx4_get_slave_from_resource_id(dev,
+                                               RES_SRQ,
+                                               be32_to_cpu(eqe->event.srq.srqn)
+                                               & 0xffffff,
+                                               &slave);
+                               if (ret && ret != -ENOENT) {
+                                       mlx4_warn(dev, "SRQ event %02x(%02x) "
+                                                 "on EQ %d at index %u: could"
+                                                 " not get slave id (%d)\n",
+                                                 eqe->type, eqe->subtype,
+                                                 eq->eqn, eq->cons_index, ret);
+                                       break;
+                               }
+                               mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
+                                         " event: %02x(%02x)\n", __func__,
+                                         slave,
+                                         be32_to_cpu(eqe->event.srq.srqn),
+                                         eqe->type, eqe->subtype);
+
+                               if (!ret && slave != dev->caps.function) {
+                                       mlx4_warn(dev, "%s: sending event "
+                                                 "%02x(%02x) to slave:%d\n",
+                                                  __func__, eqe->type,
+                                                 eqe->subtype, slave);
+                                       mlx4_slave_event(dev, slave, eqe);
+                                       break;
+                               }
+                       }
+                       mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
+                                      0xffffff, eqe->type);
                        break;
 
                case MLX4_EVENT_TYPE_CMD:
@@ -211,13 +336,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                case MLX4_EVENT_TYPE_PORT_CHANGE:
                        port = be32_to_cpu(eqe->event.port_change.port) >> 28;
                        if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
-                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+                               mlx4_dispatch_event(dev,
+                                                   MLX4_DEV_EVENT_PORT_DOWN,
                                                    port);
                                mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+                               if (mlx4_is_master(dev))
+                                       /*change the state of all slave's port
+                                       * to down:*/
+                                       for (i = 0; i < dev->num_slaves; i++) {
+                                               mlx4_dbg(dev, "%s: Sending "
+                                                        "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+                                                        " to slave: %d, port:%d\n",
+                                                        __func__, i, port);
+                                               if (i == dev->caps.function)
+                                                       continue;
+                                               mlx4_slave_event(dev, i, eqe);
+                                       }
                        } else {
-                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+                               mlx4_dispatch_event(dev,
+                                                   MLX4_DEV_EVENT_PORT_UP,
                                                    port);
                                mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+
+                               if (mlx4_is_master(dev)) {
+                                       for (i = 0; i < dev->num_slaves; i++) {
+                                               if (i == dev->caps.function)
+                                                       continue;
+                                               mlx4_slave_event(dev, i, eqe);
+                                       }
+                               }
                        }
                        break;
 
@@ -226,7 +373,28 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                  eqe->event.cq_err.syndrome == 1 ?
                                  "overrun" : "access violation",
                                  be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
-                       mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+                       if (mlx4_is_master(dev)) {
+                               ret = mlx4_get_slave_from_resource_id(dev,
+                                       RES_CQ,
+                                       be32_to_cpu(eqe->event.cq_err.cqn)
+                                       & 0xffffff, &slave);
+                               if (ret && ret != -ENOENT) {
+                                       mlx4_dbg(dev, "CQ event %02x(%02x) on "
+                                                "EQ %d at index %u: could "
+                                                 "not get slave id (%d)\n",
+                                                 eqe->type, eqe->subtype,
+                                                 eq->eqn, eq->cons_index, ret);
+                                       break;
+                               }
+
+                               if (!ret && slave != dev->caps.function) {
+                                       mlx4_slave_event(dev, slave, eqe);
+                                       break;
+                               }
+                       }
+                       mlx4_cq_event(dev,
+                                     be32_to_cpu(eqe->event.cq_err.cqn)
+                                     & 0xffffff,
                                      eqe->type);
                        break;
 
@@ -234,13 +402,60 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                        mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
                        break;
 
+               case MLX4_EVENT_TYPE_COMM_CHANNEL:
+                       if (!mlx4_is_master(dev)) {
+                               mlx4_warn(dev, "Received comm channel event "
+                                              "for non master device\n");
+                               break;
+                       }
+                       memcpy(&priv->mfunc.master.comm_arm_bit_vector,
+                              eqe->event.comm_channel_arm.bit_vec,
+                              sizeof eqe->event.comm_channel_arm.bit_vec);
+                       queue_work(priv->mfunc.master.comm_wq,
+                                  &priv->mfunc.master.comm_work);
+                       break;
+
+               case MLX4_EVENT_TYPE_FLR_EVENT:
+                       flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
+                       if (!mlx4_is_master(dev)) {
+                               mlx4_warn(dev, "Non-master function received"
+                                              "FLR event\n");
+                               break;
+                       }
+
+                       mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
+
+                       if (flr_slave > dev->num_slaves) {
+                               mlx4_warn(dev,
+                                         "Got FLR for unknown function: %d\n",
+                                         flr_slave);
+                               update_slave_state = 0;
+                       } else
+                               update_slave_state = 1;
+
+                       spin_lock(&priv->mfunc.master.slave_state_lock);
+                       if (update_slave_state) {
+                               priv->mfunc.master.slave_state[flr_slave].active = false;
+                               priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
+                               priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
+                       }
+                       spin_unlock(&priv->mfunc.master.slave_state_lock);
+                       queue_work(priv->mfunc.master.comm_wq,
+                                  &priv->mfunc.master.slave_flr_event_work);
+                       break;
                case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
                case MLX4_EVENT_TYPE_ECC_DETECT:
                default:
-                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
-                                 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
+                                 "index %u. owner=%x, nent=0x%x, slave=%x, "
+                                 "ownership=%s\n",
+                                 eqe->type, eqe->subtype, eq->eqn,
+                                 eq->cons_index, eqe->owner, eq->nent,
+                                 eqe->slave_id,
+                                 !!(eqe->owner & 0x80) ^
+                                 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
                        break;
-               }
+               };
 
                ++eq->cons_index;
                eqes_found = 1;
@@ -290,25 +505,58 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
        return IRQ_HANDLED;
 }
 
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr,
+                       struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_event_eq_info *event_eq =
+               &priv->mfunc.master.slave_state[slave].event_eq;
+       u32 in_modifier = vhcr->in_modifier;
+       u32 eqn = in_modifier & 0x1FF;
+       u64 in_param =  vhcr->in_param;
+       int err = 0;
+
+       if (slave == dev->caps.function)
+               err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
+                              0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+                              MLX4_CMD_NATIVE);
+       if (!err) {
+               if (in_modifier >> 31) {
+                       /* unmap */
+                       event_eq->event_type &= ~in_param;
+               } else {
+                       event_eq->eqn = eqn;
+                       event_eq->event_type = in_param;
+               }
+       }
+       return err;
+}
+
 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
                        int eq_num)
 {
        return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
-                       0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+                       0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int eq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
-                       MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0,
+                       MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int eq_num)
 {
-       return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
-                           MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num,
+                           0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
+                           MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -585,14 +833,16 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
        for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
                priv->eq_table.uar_map[i] = NULL;
 
-       err = mlx4_map_clr_int(dev);
-       if (err)
-               goto err_out_bitmap;
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_map_clr_int(dev);
+               if (err)
+                       goto err_out_bitmap;
 
-       priv->eq_table.clr_mask =
-               swab32(1 << (priv->eq_table.inta_pin & 31));
-       priv->eq_table.clr_int  = priv->clr_base +
-               (priv->eq_table.inta_pin < 32 ? 4 : 0);
+               priv->eq_table.clr_mask =
+                       swab32(1 << (priv->eq_table.inta_pin & 31));
+               priv->eq_table.clr_int  = priv->clr_base +
+                       (priv->eq_table.inta_pin < 32 ? 4 : 0);
+       }
 
        priv->eq_table.irq_names =
                kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
@@ -700,7 +950,8 @@ err_out_unmap:
                mlx4_free_eq(dev, &priv->eq_table.eq[i]);
                --i;
        }
-       mlx4_unmap_clr_int(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_unmap_clr_int(dev);
        mlx4_free_irqs(dev);
 
 err_out_bitmap:
@@ -725,7 +976,8 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
        for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
                mlx4_free_eq(dev, &priv->eq_table.eq[i]);
 
-       mlx4_unmap_clr_int(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_unmap_clr_int(dev);
 
        for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
                if (priv->eq_table.uar_map[i])
@@ -748,7 +1000,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
 
        err = mlx4_NOP(dev);
        /* When not in MSI_X, there is only one irq to check */
-       if (!(dev->flags & MLX4_FLAG_MSI_X))
+       if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
                return err;
 
        /* A loop over all completion vectors, for each vector we will check
index 435ca6e..e0639eb 100644 (file)
@@ -32,6 +32,7 @@
  * SOFTWARE.
  */
 
+#include <linux/etherdevice.h>
 #include <linux/mlx4/cmd.h>
 #include <linux/module.h>
 #include <linux/cache.h>
@@ -48,7 +49,7 @@ enum {
 extern void __buggy_use_of_MLX4_GET(void);
 extern void __buggy_use_of_MLX4_PUT(void);
 
-static int enable_qos;
+static bool enable_qos;
 module_param(enable_qos, bool, 0444);
 MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
 
@@ -139,12 +140,185 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
        MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
 
        err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
 
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd)
+{
+       u8      field;
+       u32     size;
+       int     err = 0;
+
+#define QUERY_FUNC_CAP_FLAGS_OFFSET            0x0
+#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET                0x1
+#define QUERY_FUNC_CAP_FUNCTION_OFFSET         0x3
+#define QUERY_FUNC_CAP_PF_BHVR_OFFSET          0x4
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET         0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET         0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET                0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET                0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET                0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET                0x28
+#define QUERY_FUNC_CAP_MAX_EQ_OFFSET           0x2c
+#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET      0X30
+
+#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET                0x3
+#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET                0xc
+
+       if (vhcr->op_modifier == 1) {
+               field = vhcr->in_modifier;
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+
+               field = 0; /* ensure fvl bit is not set */
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+       } else if (vhcr->op_modifier == 0) {
+               field = 1 << 7; /* enable only ethernet interface */
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
+
+               field = slave;
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+
+               field = dev->caps.num_ports;
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+
+               size = 0; /* no PF behavious is set for now */
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+
+               size = dev->caps.num_qps;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+
+               size = dev->caps.num_srqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+
+               size = dev->caps.num_cqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+
+               size = dev->caps.num_eqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+
+               size = dev->caps.reserved_eqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+
+               size = dev->caps.num_mpts;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+
+               size = dev->caps.num_mtts;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+
+               size = dev->caps.num_mgms + dev->caps.num_amgms;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+
+       } else
+               err = -EINVAL;
+
+       return err;
+}
+
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32                     *outbox;
+       u8                      field;
+       u32                     size;
+       int                     i;
+       int                     err = 0;
+
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
+                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+       if (err)
+               goto out;
+
+       outbox = mailbox->buf;
+
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
+       if (!(field & (1 << 7))) {
+               mlx4_err(dev, "The host doesn't support eth interface\n");
+               err = -EPROTONOSUPPORT;
+               goto out;
+       }
+
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+       func_cap->function = field;
+
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+       func_cap->num_ports = field;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+       func_cap->pf_context_behaviour = size;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+       func_cap->qp_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+       func_cap->srq_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+       func_cap->cq_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+       func_cap->max_eq = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+       func_cap->reserved_eq = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+       func_cap->mpt_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+       func_cap->mtt_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+       func_cap->mcg_quota = size & 0xFFFFFF;
+
+       for (i = 1; i <= func_cap->num_ports; ++i) {
+               err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
+                                  MLX4_CMD_QUERY_FUNC_CAP,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       goto out;
+
+               MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+               if (field & (1 << 7)) {
+                       mlx4_err(dev, "VLAN is enforced on this port\n");
+                       err = -EPROTONOSUPPORT;
+                       goto out;
+               }
+
+               if (field & (1 << 6)) {
+                       mlx4_err(dev, "Force mac is enabled on this port\n");
+                       err = -EPROTONOSUPPORT;
+                       goto out;
+               }
+
+               MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+               func_cap->physical_port[i] = field;
+       }
+
+       /* All other resources are allocated by the master, but we still report
+        * 'num' and 'reserved' capabilities as follows:
+        * - num remains the maximum resource index
+        * - 'num - reserved' is the total available objects of a resource, but
+        *   resource indices may be less than 'reserved'
+        * TODO: set per-resource quotas */
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -229,7 +403,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        outbox = mailbox->buf;
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
-                          MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
        if (err)
                goto out;
 
@@ -396,12 +570,15 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
                for (i = 1; i <= dev_cap->num_ports; ++i) {
                        err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
-                                          MLX4_CMD_TIME_CLASS_B);
+                                          MLX4_CMD_TIME_CLASS_B,
+                                          !mlx4_is_slave(dev));
                        if (err)
                                goto out;
 
                        MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
                        dev_cap->supported_port_types[i] = field & 3;
+                       dev_cap->suggested_type[i] = (field >> 3) & 1;
+                       dev_cap->default_sense[i] = (field >> 4) & 1;
                        MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
                        dev_cap->ib_mtu[i]         = field & 0xf;
                        MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
@@ -470,6 +647,54 @@ out:
        return err;
 }
 
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd)
+{
+       u64 def_mac;
+       u8 port_type;
+       int err;
+
+       err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
+                          MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_NATIVE);
+
+       if (!err && dev->caps.function != slave) {
+               /* set slave default_mac address */
+               MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+               def_mac += slave << 8;
+               MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
+
+               /* get port type - currently only eth is enabled */
+               MLX4_GET(port_type, outbox->buf,
+                        QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+
+               /* disable ib */
+               port_type &= 0xFE;
+
+               /* check eth is enabled for this port */
+               if (!(port_type & 2))
+                       mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
+
+               MLX4_PUT(outbox->buf, port_type,
+                        QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+       }
+
+       return err;
+}
+
+static int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port)
+{
+       struct mlx4_cmd_mailbox *outbox = ptr;
+
+       return mlx4_cmd_box(dev, 0, outbox->dma, port, 0,
+                           MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+                           MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT);
+
 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -519,7 +744,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
 
                        if (++nent == MLX4_MAILBOX_SIZE / 16) {
                                err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
-                                               MLX4_CMD_TIME_CLASS_B);
+                                               MLX4_CMD_TIME_CLASS_B,
+                                               MLX4_CMD_NATIVE);
                                if (err)
                                        goto out;
                                nent = 0;
@@ -528,7 +754,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
        }
 
        if (nent)
-               err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+               err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+                              MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
        if (err)
                goto out;
 
@@ -557,13 +784,15 @@ int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
 
 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 
 int mlx4_RUN_FW(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
 int mlx4_QUERY_FW(struct mlx4_dev *dev)
@@ -579,6 +808,7 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
 
 #define QUERY_FW_OUT_SIZE             0x100
 #define QUERY_FW_VER_OFFSET            0x00
+#define QUERY_FW_PPF_ID                       0x09
 #define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
 #define QUERY_FW_MAX_CMD_OFFSET        0x0f
 #define QUERY_FW_ERR_START_OFFSET      0x30
@@ -589,13 +819,16 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
 #define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
 #define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
 
+#define QUERY_FW_COMM_BASE_OFFSET      0x40
+#define QUERY_FW_COMM_BAR_OFFSET       0x48
+
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
        outbox = mailbox->buf;
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
        if (err)
                goto out;
 
@@ -608,6 +841,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
                ((fw_ver & 0xffff0000ull) >> 16) |
                ((fw_ver & 0x0000ffffull) << 16);
 
+       MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
+       dev->caps.function = lg;
+
        MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
        if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
            cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
@@ -649,6 +885,11 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
        MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
        fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
 
+       MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
+       MLX4_GET(fw->comm_bar,  outbox, QUERY_FW_COMM_BAR_OFFSET);
+       fw->comm_bar = (fw->comm_bar >> 6) * 2;
+       mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
+                fw->comm_bar, fw->comm_base);
        mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
 
        /*
@@ -711,7 +952,7 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
        outbox = mailbox->buf;
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
-                          MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
        if (err)
                goto out;
 
@@ -743,6 +984,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
 #define         INIT_HCA_LOG_SRQ_OFFSET         (INIT_HCA_QPC_OFFSET + 0x2f)
 #define         INIT_HCA_CQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x30)
 #define         INIT_HCA_LOG_CQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x37)
+#define         INIT_HCA_EQE_CQE_OFFSETS        (INIT_HCA_QPC_OFFSET + 0x38)
 #define         INIT_HCA_ALTC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x40)
 #define         INIT_HCA_AUXC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x50)
 #define         INIT_HCA_EQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x60)
@@ -831,10 +1073,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
 
        /* UAR attributes */
 
-       MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+       MLX4_PUT(inbox, param->uar_page_sz,     INIT_HCA_UAR_PAGE_SZ_OFFSET);
        MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
 
-       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
+       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
+                      MLX4_CMD_NATIVE);
 
        if (err)
                mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -843,6 +1086,101 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
        return err;
 }
 
+int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+                  struct mlx4_init_hca_param *param)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       __be32 *outbox;
+       int err;
+
+#define QUERY_HCA_GLOBAL_CAPS_OFFSET   0x04
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+                          MLX4_CMD_QUERY_HCA,
+                          MLX4_CMD_TIME_CLASS_B,
+                          !mlx4_is_slave(dev));
+       if (err)
+               goto out;
+
+       MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
+
+       /* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+       MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
+       MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
+       MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
+       MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
+       MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
+       MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
+       MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
+       MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
+       MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
+       MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
+       MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+       MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+
+       /* multicast attributes */
+
+       MLX4_GET(param->mc_base,         outbox, INIT_HCA_MC_BASE_OFFSET);
+       MLX4_GET(param->log_mc_entry_sz, outbox,
+                INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+       MLX4_GET(param->log_mc_hash_sz,  outbox,
+                INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+       MLX4_GET(param->log_mc_table_sz, outbox,
+                INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+       /* TPT attributes */
+
+       MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
+       MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+       MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
+       MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
+
+       /* UAR attributes */
+
+       MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+       MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int port = vhcr->in_modifier;
+       int err;
+
+       if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
+               return 0;
+
+       if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+               return -ENODEV;
+
+       /* Enable port only if it was previously disabled */
+       if (!priv->mfunc.master.init_port_ref[port]) {
+               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+               if (err)
+                       return err;
+               priv->mfunc.master.slave_state[slave].init_port_mask |=
+                       (1 << port);
+       }
+       ++priv->mfunc.master.init_port_ref[port];
+       return 0;
+}
+
 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -886,33 +1224,62 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
                MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
 
                err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
-                              MLX4_CMD_TIME_CLASS_A);
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 
                mlx4_free_cmd_mailbox(dev, mailbox);
        } else
                err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
-                              MLX4_CMD_TIME_CLASS_A);
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
 
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int port = vhcr->in_modifier;
+       int err;
+
+       if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
+           (1 << port)))
+               return 0;
+
+       if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+               return -ENODEV;
+       if (priv->mfunc.master.init_port_ref[port] == 1) {
+               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+                              MLX4_CMD_NATIVE);
+               if (err)
+                       return err;
+       }
+       priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+       --priv->mfunc.master.init_port_ref[port];
+       return 0;
+}
+
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
 {
-       return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+       return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+                       MLX4_CMD_WRAPPED);
 }
 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
 
 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
 {
-       return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+       return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
+                       MLX4_CMD_NATIVE);
 }
 
 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
 {
        int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
                               MLX4_CMD_SET_ICM_SIZE,
-                              MLX4_CMD_TIME_CLASS_A);
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
        if (ret)
                return ret;
 
@@ -929,7 +1296,7 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
 int mlx4_NOP(struct mlx4_dev *dev)
 {
        /* Input modifier of 0x1f means "finish as soon as possible." */
-       return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+       return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
 }
 
 #define MLX4_WOL_SETUP_MODE (5 << 28)
@@ -938,7 +1305,8 @@ int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
        u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
 
        return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
-                           MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+                           MLX4_CMD_NATIVE);
 }
 EXPORT_SYMBOL_GPL(mlx4_wol_read);
 
@@ -947,6 +1315,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
        u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
 
        return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
-                                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 EXPORT_SYMBOL_GPL(mlx4_wol_write);
index bf5ec22..119e0cc 100644 (file)
@@ -111,11 +111,30 @@ struct mlx4_dev_cap {
        u64 max_icm_sz;
        int max_gso_sz;
        u8  supported_port_types[MLX4_MAX_PORTS + 1];
+       u8  suggested_type[MLX4_MAX_PORTS + 1];
+       u8  default_sense[MLX4_MAX_PORTS + 1];
        u8  log_max_macs[MLX4_MAX_PORTS + 1];
        u8  log_max_vlans[MLX4_MAX_PORTS + 1];
        u32 max_counters;
 };
 
+struct mlx4_func_cap {
+       u8      function;
+       u8      num_ports;
+       u8      flags;
+       u32     pf_context_behaviour;
+       int     qp_quota;
+       int     cq_quota;
+       int     srq_quota;
+       int     mpt_quota;
+       int     mtt_quota;
+       int     max_eq;
+       int     reserved_eq;
+       int     mcg_quota;
+       u8      physical_port[MLX4_MAX_PORTS + 1];
+       u8      port_flags[MLX4_MAX_PORTS + 1];
+};
+
 struct mlx4_adapter {
        char board_id[MLX4_BOARD_ID_LEN];
        u8   inta_pin;
@@ -133,6 +152,7 @@ struct mlx4_init_hca_param {
        u64 dmpt_base;
        u64 cmpt_base;
        u64 mtt_base;
+       u64 global_caps;
        u16 log_mc_entry_sz;
        u16 log_mc_hash_sz;
        u8  log_num_qps;
@@ -143,6 +163,7 @@ struct mlx4_init_hca_param {
        u8  log_mc_table_sz;
        u8  log_mpt_sz;
        u8  log_uar_sz;
+       u8  uar_page_sz; /* log pg sz in 4k chunks */
 };
 
 struct mlx4_init_ib_param {
@@ -167,12 +188,19 @@ struct mlx4_set_ib_param {
 };
 
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap);
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd);
 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
 int mlx4_UNMAP_FA(struct mlx4_dev *dev);
 int mlx4_RUN_FW(struct mlx4_dev *dev);
 int mlx4_QUERY_FW(struct mlx4_dev *dev);
 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
index 02393fd..a9ade1c 100644 (file)
@@ -213,7 +213,7 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
 {
        return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
-                       MLX4_CMD_TIME_CLASS_B);
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
@@ -223,7 +223,8 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
 
 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
index ca6feb5..b4e9f6f 100644 (file)
@@ -142,7 +142,8 @@ int mlx4_register_device(struct mlx4_dev *dev)
                mlx4_add_device(intf, priv);
 
        mutex_unlock(&intf_mutex);
-       mlx4_start_catas_poll(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_start_catas_poll(dev);
 
        return 0;
 }
@@ -152,7 +153,8 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_interface *intf;
 
-       mlx4_stop_catas_poll(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_stop_catas_poll(dev);
        mutex_lock(&intf_mutex);
 
        list_for_each_entry(intf, &intf_list, list)
index 94bbc85..1209934 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/io-mapping.h>
+#include <linux/delay.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
@@ -75,21 +76,42 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
 
 #endif /* CONFIG_PCI_MSI */
 
+static int num_vfs;
+module_param(num_vfs, int, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+
+static int probe_vf;
+module_param(probe_vf, int, 0644);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+
+int mlx4_log_num_mgm_entry_size = 10;
+module_param_named(log_num_mgm_entry_size,
+                       mlx4_log_num_mgm_entry_size, int, 0444);
+MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
+                                        " of qp per mcg, for example:"
+                                        " 10 gives 248.range: 9<="
+                                        " log_num_mgm_entry_size <= 12");
+
+#define MLX4_VF                                        (1 << 0)
+
+#define HCA_GLOBAL_CAP_MASK            0
+#define PF_CONTEXT_BEHAVIOUR_MASK      0
+
 static char mlx4_version[] __devinitdata =
        DRV_NAME ": Mellanox ConnectX core driver v"
        DRV_VERSION " (" DRV_RELDATE ")\n";
 
 static struct mlx4_profile default_profile = {
-       .num_qp         = 1 << 17,
+       .num_qp         = 1 << 18,
        .num_srq        = 1 << 16,
        .rdmarc_per_qp  = 1 << 4,
        .num_cq         = 1 << 16,
        .num_mcg        = 1 << 13,
-       .num_mpt        = 1 << 17,
+       .num_mpt        = 1 << 19,
        .num_mtt        = 1 << 20,
 };
 
-static int log_num_mac = 2;
+static int log_num_mac = 7;
 module_param_named(log_num_mac, log_num_mac, int, 0444);
 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
 
@@ -99,15 +121,33 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
 /* Log2 max number of VLANs per ETH port (0-7) */
 #define MLX4_LOG_NUM_VLANS 7
 
-static int use_prio;
+static bool use_prio;
 module_param_named(use_prio, use_prio, bool, 0444);
 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
                  "(0/1, default 0)");
 
-static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
 
+static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
+static int arr_argc = 2;
+module_param_array(port_type_array, int, &arr_argc, 0444);
+MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
+                               "1 for IB, 2 for Ethernet");
+
+struct mlx4_port_config {
+       struct list_head list;
+       enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+       struct pci_dev *pdev;
+};
+
+static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
+{
+       return dev->caps.reserved_eqs +
+               MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
+}
+
 int mlx4_check_port_params(struct mlx4_dev *dev,
                           enum mlx4_port_type *port_type)
 {
@@ -140,10 +180,8 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
 {
        int i;
 
-       dev->caps.port_mask = 0;
        for (i = 1; i <= dev->caps.num_ports; ++i)
-               if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
-                       dev->caps.port_mask |= 1 << (i - 1);
+               dev->caps.port_mask[i] = dev->caps.port_type[i];
 }
 
 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
@@ -188,12 +226,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
                dev->caps.def_mac[i]        = dev_cap->def_mac[i];
                dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+               dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
+               dev->caps.default_sense[i] = dev_cap->default_sense[i];
                dev->caps.trans_type[i]     = dev_cap->trans_type[i];
                dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
                dev->caps.wavelength[i]     = dev_cap->wavelength[i];
                dev->caps.trans_code[i]     = dev_cap->trans_code[i];
        }
 
+       dev->caps.uar_page_size      = PAGE_SIZE;
        dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
        dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
        dev->caps.bf_reg_size        = dev_cap->bf_reg_size;
@@ -207,7 +248,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.reserved_srqs      = dev_cap->reserved_srqs;
        dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
        dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
-       dev->caps.num_qp_per_mgm     = MLX4_QP_PER_MGM;
+       dev->caps.num_qp_per_mgm     = mlx4_get_qp_per_mgm(dev);
        /*
         * Subtract 1 from the limit because we need to allocate a
         * spare CQE so the HCA HW can tell the difference between an
@@ -216,17 +257,18 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.max_cqes           = dev_cap->max_cq_sz - 1;
        dev->caps.reserved_cqs       = dev_cap->reserved_cqs;
        dev->caps.reserved_eqs       = dev_cap->reserved_eqs;
-       dev->caps.mtts_per_seg       = 1 << log_mtts_per_seg;
-       dev->caps.reserved_mtts      = DIV_ROUND_UP(dev_cap->reserved_mtts,
-                                                   dev->caps.mtts_per_seg);
+       dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
        dev->caps.reserved_mrws      = dev_cap->reserved_mrws;
-       dev->caps.reserved_uars      = dev_cap->reserved_uars;
+
+       /* The first 128 UARs are used for EQ doorbells */
+       dev->caps.reserved_uars      = max_t(int, 128, dev_cap->reserved_uars);
        dev->caps.reserved_pds       = dev_cap->reserved_pds;
        dev->caps.reserved_xrcds     = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
                                        dev_cap->reserved_xrcds : 0;
        dev->caps.max_xrcds          = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
                                        dev_cap->max_xrcds : 0;
-       dev->caps.mtt_entry_sz       = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+       dev->caps.mtt_entry_sz       = dev_cap->mtt_entry_sz;
+
        dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
        dev->caps.page_size_cap      = ~(u32) (dev_cap->min_page_sz - 1);
        dev->caps.flags              = dev_cap->flags;
@@ -235,18 +277,70 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
        dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
 
+       /* Sense port always allowed on supported devices for ConnectX1 and 2 */
+       if (dev->pdev->device != 0x1003)
+               dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
+
        dev->caps.log_num_macs  = log_num_mac;
        dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
        dev->caps.log_num_prios = use_prio ? 3 : 0;
 
        for (i = 1; i <= dev->caps.num_ports; ++i) {
-               if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
-                       dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
-               else
-                       dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
-               dev->caps.possible_type[i] = dev->caps.port_type[i];
+               dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
+               if (dev->caps.supported_type[i]) {
+                       /* if only ETH is supported - assign ETH */
+                       if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
+                               dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+                       /* if only IB is supported,
+                        * assign IB only if SRIOV is off*/
+                       else if (dev->caps.supported_type[i] ==
+                                MLX4_PORT_TYPE_IB) {
+                               if (dev->flags & MLX4_FLAG_SRIOV)
+                                       dev->caps.port_type[i] =
+                                               MLX4_PORT_TYPE_NONE;
+                               else
+                                       dev->caps.port_type[i] =
+                                               MLX4_PORT_TYPE_IB;
+                       /* if IB and ETH are supported,
+                        * first of all check if SRIOV is on */
+                       } else if (dev->flags & MLX4_FLAG_SRIOV)
+                               dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+                       else {
+                               /* In non-SRIOV mode, we set the port type
+                                * according to user selection of port type,
+                                * if usere selected none, take the FW hint */
+                               if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
+                                       dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
+                                               MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
+                               else
+                                       dev->caps.port_type[i] = port_type_array[i-1];
+                       }
+               }
+               /*
+                * Link sensing is allowed on the port if 3 conditions are true:
+                * 1. Both protocols are supported on the port.
+                * 2. Different types are supported on the port
+                * 3. FW declared that it supports link sensing
+                */
                mlx4_priv(dev)->sense.sense_allowed[i] =
-                       dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
+                       ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
+                        (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+                        (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
+
+               /*
+                * If "default_sense" bit is set, we move the port to "AUTO" mode
+                * and perform sense_port FW command to try and set the correct
+                * port type from beginning
+                */
+               if (mlx4_priv(dev)->sense.sense_allowed && dev->caps.default_sense[i]) {
+                       enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
+                       dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
+                       mlx4_SENSE_PORT(dev, i, &sensed_port);
+                       if (sensed_port != MLX4_PORT_TYPE_NONE)
+                               dev->caps.port_type[i] = sensed_port;
+               } else {
+                       dev->caps.possible_type[i] = dev->caps.port_type[i];
+               }
 
                if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
                        dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -262,8 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                }
        }
 
-       mlx4_set_port_mask(dev);
-
        dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
 
        dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
@@ -282,6 +374,149 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
        return 0;
 }
+/*The function checks if there are live vf, return the num of them*/
+static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_state;
+       int i;
+       int ret = 0;
+
+       for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
+               s_state = &priv->mfunc.master.slave_state[i];
+               if (s_state->active && s_state->last_cmd !=
+                   MLX4_COMM_CMD_RESET) {
+                       mlx4_warn(dev, "%s: slave: %d is still active\n",
+                                 __func__, i);
+                       ret++;
+               }
+       }
+       return ret;
+}
+
+static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_slave;
+
+       if (!mlx4_is_master(dev))
+               return 0;
+
+       s_slave = &priv->mfunc.master.slave_state[slave];
+       return !!s_slave->active;
+}
+EXPORT_SYMBOL(mlx4_is_slave_active);
+
+static int mlx4_slave_cap(struct mlx4_dev *dev)
+{
+       int                        err;
+       u32                        page_size;
+       struct mlx4_dev_cap        dev_cap;
+       struct mlx4_func_cap       func_cap;
+       struct mlx4_init_hca_param hca_param;
+       int                        i;
+
+       memset(&hca_param, 0, sizeof(hca_param));
+       err = mlx4_QUERY_HCA(dev, &hca_param);
+       if (err) {
+               mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+               return err;
+       }
+
+       /*fail if the hca has an unknown capability */
+       if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
+           HCA_GLOBAL_CAP_MASK) {
+               mlx4_err(dev, "Unknown hca global capabilities\n");
+               return -ENOSYS;
+       }
+
+       mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+
+       memset(&dev_cap, 0, sizeof(dev_cap));
+       err = mlx4_dev_cap(dev, &dev_cap);
+       if (err) {
+               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+               return err;
+       }
+
+       page_size = ~dev->caps.page_size_cap + 1;
+       mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
+       if (page_size > PAGE_SIZE) {
+               mlx4_err(dev, "HCA minimum page size of %d bigger than "
+                        "kernel PAGE_SIZE of %ld, aborting.\n",
+                        page_size, PAGE_SIZE);
+               return -ENODEV;
+       }
+
+       /* slave gets uar page size from QUERY_HCA fw command */
+       dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
+
+       /* TODO: relax this assumption */
+       if (dev->caps.uar_page_size != PAGE_SIZE) {
+               mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
+                        dev->caps.uar_page_size, PAGE_SIZE);
+               return -ENODEV;
+       }
+
+       memset(&func_cap, 0, sizeof(func_cap));
+       err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
+       if (err) {
+               mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
+               return err;
+       }
+
+       if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
+           PF_CONTEXT_BEHAVIOUR_MASK) {
+               mlx4_err(dev, "Unknown pf context behaviour\n");
+               return -ENOSYS;
+       }
+
+       dev->caps.function              = func_cap.function;
+       dev->caps.num_ports             = func_cap.num_ports;
+       dev->caps.num_qps               = func_cap.qp_quota;
+       dev->caps.num_srqs              = func_cap.srq_quota;
+       dev->caps.num_cqs               = func_cap.cq_quota;
+       dev->caps.num_eqs               = func_cap.max_eq;
+       dev->caps.reserved_eqs          = func_cap.reserved_eq;
+       dev->caps.num_mpts              = func_cap.mpt_quota;
+       dev->caps.num_mtts              = func_cap.mtt_quota;
+       dev->caps.num_pds               = MLX4_NUM_PDS;
+       dev->caps.num_mgms              = 0;
+       dev->caps.num_amgms             = 0;
+
+       for (i = 1; i <= dev->caps.num_ports; ++i)
+               dev->caps.port_mask[i] = dev->caps.port_type[i];
+
+       if (dev->caps.num_ports > MLX4_MAX_PORTS) {
+               mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+                        "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+               return -ENODEV;
+       }
+
+       if (dev->caps.uar_page_size * (dev->caps.num_uars -
+                                      dev->caps.reserved_uars) >
+                                      pci_resource_len(dev->pdev, 2)) {
+               mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
+                        "PCI resource 2 size of 0x%llx, aborting.\n",
+                        dev->caps.uar_page_size * dev->caps.num_uars,
+                        (unsigned long long) pci_resource_len(dev->pdev, 2));
+               return -ENODEV;
+       }
+
+#if 0
+       mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
+       mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
+                 dev->caps.num_uars, dev->caps.reserved_uars,
+                 dev->caps.uar_page_size * dev->caps.num_uars,
+                 pci_resource_len(dev->pdev, 2));
+       mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
+                 dev->caps.reserved_eqs);
+       mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
+                 dev->caps.num_pds, dev->caps.reserved_pds,
+                 dev->caps.slave_pd_shift, dev->caps.pd_base);
+#endif
+       return 0;
+}
 
 /*
  * Change the port configuration of the device.
@@ -377,7 +612,8 @@ static ssize_t set_port_type(struct device *dev,
                        types[i] = mdev->caps.port_type[i+1];
        }
 
-       if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+       if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+           !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
                for (i = 1; i <= mdev->caps.num_ports; i++) {
                        if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
                                mdev->caps.possible_type[i] = mdev->caps.port_type[i];
@@ -451,6 +687,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        int err;
+       int num_eqs;
 
        err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
                                  cmpt_base +
@@ -480,12 +717,14 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
        if (err)
                goto err_srq;
 
+       num_eqs = (mlx4_is_master(dev)) ?
+               roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+               dev->caps.num_eqs;
        err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
                                  cmpt_base +
                                  ((u64) (MLX4_CMPT_TYPE_EQ *
                                          cmpt_entry_sz) << MLX4_CMPT_SHIFT),
-                                 cmpt_entry_sz,
-                                 dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
+                                 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
        if (err)
                goto err_cq;
 
@@ -509,6 +748,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        u64 aux_pages;
+       int num_eqs;
        int err;
 
        err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
@@ -540,10 +780,13 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                goto err_unmap_aux;
        }
 
+
+       num_eqs = (mlx4_is_master(dev)) ?
+               roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+               dev->caps.num_eqs;
        err = mlx4_init_icm_table(dev, &priv->eq_table.table,
                                  init_hca->eqc_base, dev_cap->eqc_entry_sz,
-                                 dev->caps.num_eqs, dev->caps.num_eqs,
-                                 0, 0);
+                                 num_eqs, num_eqs, 0, 0);
        if (err) {
                mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
                goto err_unmap_cmpt;
@@ -563,7 +806,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
        err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
                                  init_hca->mtt_base,
                                  dev->caps.mtt_entry_sz,
-                                 dev->caps.num_mtt_segs,
+                                 dev->caps.num_mtts,
                                  dev->caps.reserved_mtts, 1, 0);
        if (err) {
                mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
@@ -650,7 +893,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
         * and it's a lot easier than trying to track ref counts.
         */
        err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
-                                 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+                                 init_hca->mc_base,
+                                 mlx4_get_mgm_entry_size(dev),
                                  dev->caps.num_mgms + dev->caps.num_amgms,
                                  dev->caps.num_mgms + dev->caps.num_amgms,
                                  0, 0);
@@ -726,6 +970,16 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
        mlx4_free_icm(dev, priv->fw.aux_icm, 0);
 }
 
+static void mlx4_slave_exit(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       down(&priv->cmd.slave_sem);
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+               mlx4_warn(dev, "Failed to close slave function.\n");
+       up(&priv->cmd.slave_sem);
+}
+
 static int map_bf_area(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -733,8 +987,10 @@ static int map_bf_area(struct mlx4_dev *dev)
        resource_size_t bf_len;
        int err = 0;
 
-       bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
-       bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+       bf_start = pci_resource_start(dev->pdev, 2) +
+                       (dev->caps.num_uars << PAGE_SHIFT);
+       bf_len = pci_resource_len(dev->pdev, 2) -
+                       (dev->caps.num_uars << PAGE_SHIFT);
        priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
        if (!priv->bf_mapping)
                err = -ENOMEM;
@@ -751,10 +1007,81 @@ static void unmap_bf_area(struct mlx4_dev *dev)
 static void mlx4_close_hca(struct mlx4_dev *dev)
 {
        unmap_bf_area(dev);
-       mlx4_CLOSE_HCA(dev, 0);
-       mlx4_free_icms(dev);
-       mlx4_UNMAP_FA(dev);
-       mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+       if (mlx4_is_slave(dev))
+               mlx4_slave_exit(dev);
+       else {
+               mlx4_CLOSE_HCA(dev, 0);
+               mlx4_free_icms(dev);
+               mlx4_UNMAP_FA(dev);
+               mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+       }
+}
+
+static int mlx4_init_slave(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u64 dma = (u64) priv->mfunc.vhcr_dma;
+       int num_of_reset_retries = NUM_OF_RESET_RETRIES;
+       int ret_from_reset = 0;
+       u32 slave_read;
+       u32 cmd_channel_ver;
+
+       down(&priv->cmd.slave_sem);
+       priv->cmd.max_cmds = 1;
+       mlx4_warn(dev, "Sending reset\n");
+       ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
+                                      MLX4_COMM_TIME);
+       /* if we are in the middle of flr the slave will try
+        * NUM_OF_RESET_RETRIES times before leaving.*/
+       if (ret_from_reset) {
+               if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
+                       msleep(SLEEP_TIME_IN_RESET);
+                       while (ret_from_reset && num_of_reset_retries) {
+                               mlx4_warn(dev, "slave is currently in the"
+                                         "middle of FLR. retrying..."
+                                         "(try num:%d)\n",
+                                         (NUM_OF_RESET_RETRIES -
+                                          num_of_reset_retries  + 1));
+                               ret_from_reset =
+                                       mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
+                                                     0, MLX4_COMM_TIME);
+                               num_of_reset_retries = num_of_reset_retries - 1;
+                       }
+               } else
+                       goto err;
+       }
+
+       /* check the driver version - the slave I/F revision
+        * must match the master's */
+       slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
+       cmd_channel_ver = mlx4_comm_get_version();
+
+       if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
+               MLX4_COMM_GET_IF_REV(slave_read)) {
+               mlx4_err(dev, "slave driver version is not supported"
+                        " by the master\n");
+               goto err;
+       }
+
+       mlx4_warn(dev, "Sending vhcr0\n");
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
+                                                   MLX4_COMM_TIME))
+               goto err;
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
+                                                   MLX4_COMM_TIME))
+               goto err;
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
+                                                   MLX4_COMM_TIME))
+               goto err;
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+               goto err;
+       up(&priv->cmd.slave_sem);
+       return 0;
+
+err:
+       mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+       up(&priv->cmd.slave_sem);
+       return -EIO;
 }
 
 static int mlx4_init_hca(struct mlx4_dev *dev)
@@ -768,56 +1095,76 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
        u64 icm_size;
        int err;
 
-       err = mlx4_QUERY_FW(dev);
-       if (err) {
-               if (err == -EACCES)
-                       mlx4_info(dev, "non-primary physical function, skipping.\n");
-               else
-                       mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
-               return err;
-       }
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_QUERY_FW(dev);
+               if (err) {
+                       if (err == -EACCES)
+                               mlx4_info(dev, "non-primary physical function, skipping.\n");
+                       else
+                               mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+                       goto unmap_bf;
+               }
 
-       err = mlx4_load_fw(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to start FW, aborting.\n");
-               return err;
-       }
+               err = mlx4_load_fw(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to start FW, aborting.\n");
+                       goto unmap_bf;
+               }
 
-       mlx4_cfg.log_pg_sz_m = 1;
-       mlx4_cfg.log_pg_sz = 0;
-       err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
-       if (err)
-               mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+               mlx4_cfg.log_pg_sz_m = 1;
+               mlx4_cfg.log_pg_sz = 0;
+               err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+               if (err)
+                       mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
 
-       err = mlx4_dev_cap(dev, &dev_cap);
-       if (err) {
-               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
-               goto err_stop_fw;
-       }
+               err = mlx4_dev_cap(dev, &dev_cap);
+               if (err) {
+                       mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+                       goto err_stop_fw;
+               }
 
-       profile = default_profile;
+               profile = default_profile;
 
-       icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
-       if ((long long) icm_size < 0) {
-               err = icm_size;
-               goto err_stop_fw;
-       }
+               icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
+                                            &init_hca);
+               if ((long long) icm_size < 0) {
+                       err = icm_size;
+                       goto err_stop_fw;
+               }
 
-       if (map_bf_area(dev))
-               mlx4_dbg(dev, "Failed to map blue flame area\n");
+               init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+               init_hca.uar_page_sz = PAGE_SHIFT - 12;
 
-       init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+               err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+               if (err)
+                       goto err_stop_fw;
 
-       err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
-       if (err)
-               goto err_stop_fw;
+               err = mlx4_INIT_HCA(dev, &init_hca);
+               if (err) {
+                       mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+                       goto err_free_icm;
+               }
+       } else {
+               err = mlx4_init_slave(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to initialize slave\n");
+                       goto unmap_bf;
+               }
 
-       err = mlx4_INIT_HCA(dev, &init_hca);
-       if (err) {
-               mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
-               goto err_free_icm;
+               err = mlx4_slave_cap(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to obtain slave caps\n");
+                       goto err_close;
+               }
        }
 
+       if (map_bf_area(dev))
+               mlx4_dbg(dev, "Failed to map blue flame area\n");
+
+       /*Only the master set the ports, all the rest got it from it.*/
+       if (!mlx4_is_slave(dev))
+               mlx4_set_port_mask(dev);
+
        err = mlx4_QUERY_ADAPTER(dev, &adapter);
        if (err) {
                mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
@@ -830,16 +1177,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
        return 0;
 
 err_close:
-       mlx4_CLOSE_HCA(dev, 0);
+       mlx4_close_hca(dev);
 
 err_free_icm:
-       mlx4_free_icms(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_free_icms(dev);
 
 err_stop_fw:
+       if (!mlx4_is_slave(dev)) {
+               mlx4_UNMAP_FA(dev);
+               mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+       }
+unmap_bf:
        unmap_bf_area(dev);
-       mlx4_UNMAP_FA(dev);
-       mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-
        return err;
 }
 
@@ -986,55 +1336,56 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                goto err_srq_table_free;
        }
 
-       err = mlx4_init_mcg_table(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "multicast group table, aborting.\n");
-               goto err_qp_table_free;
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_init_mcg_table(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to initialize "
+                                "multicast group table, aborting.\n");
+                       goto err_qp_table_free;
+               }
        }
 
        err = mlx4_init_counters_table(dev);
        if (err && err != -ENOENT) {
                mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
-               goto err_counters_table_free;
+               goto err_mcg_table_free;
        }
 
-       for (port = 1; port <= dev->caps.num_ports; port++) {
-               enum mlx4_port_type port_type = 0;
-               mlx4_SENSE_PORT(dev, port, &port_type);
-               if (port_type)
-                       dev->caps.port_type[port] = port_type;
-               ib_port_default_caps = 0;
-               err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
-               if (err)
-                       mlx4_warn(dev, "failed to get port %d default "
-                                 "ib capabilities (%d). Continuing with "
-                                 "caps = 0\n", port, err);
-               dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
-
-               err = mlx4_check_ext_port_caps(dev, port);
-               if (err)
-                       mlx4_warn(dev, "failed to get port %d extended "
-                                 "port capabilities support info (%d)."
-                                 " Assuming not supported\n", port, err);
+       if (!mlx4_is_slave(dev)) {
+               for (port = 1; port <= dev->caps.num_ports; port++) {
+                       ib_port_default_caps = 0;
+                       err = mlx4_get_port_ib_caps(dev, port,
+                                                   &ib_port_default_caps);
+                       if (err)
+                               mlx4_warn(dev, "failed to get port %d default "
+                                         "ib capabilities (%d). Continuing "
+                                         "with caps = 0\n", port, err);
+                       dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+
+                       err = mlx4_check_ext_port_caps(dev, port);
+                       if (err)
+                               mlx4_warn(dev, "failed to get port %d extended "
+                                         "port capabilities support info (%d)."
+                                         " Assuming not supported\n",
+                                         port, err);
 
-               err = mlx4_SET_PORT(dev, port);
-               if (err) {
-                       mlx4_err(dev, "Failed to set port %d, aborting\n",
-                               port);
-                       goto err_mcg_table_free;
+                       err = mlx4_SET_PORT(dev, port);
+                       if (err) {
+                               mlx4_err(dev, "Failed to set port %d, aborting\n",
+                                       port);
+                               goto err_counters_table_free;
+                       }
                }
        }
-       mlx4_set_port_mask(dev);
 
        return 0;
 
-err_mcg_table_free:
-       mlx4_cleanup_mcg_table(dev);
-
 err_counters_table_free:
        mlx4_cleanup_counters_table(dev);
 
+err_mcg_table_free:
+       mlx4_cleanup_mcg_table(dev);
+
 err_qp_table_free:
        mlx4_cleanup_qp_table(dev);
 
@@ -1081,8 +1432,16 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
        int i;
 
        if (msi_x) {
-               nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
-                            nreq);
+               /* In multifunction mode each function gets 2 msi-X vectors
+                * one for data path completions anf the other for asynch events
+                * or command completions */
+               if (mlx4_is_mfunc(dev)) {
+                       nreq = 2;
+               } else {
+                       nreq = min_t(int, dev->caps.num_eqs -
+                                    dev->caps.reserved_eqs, nreq);
+               }
+
                entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
                if (!entries)
                        goto no_msi;
@@ -1138,16 +1497,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
 
        info->dev = dev;
        info->port = port;
-       mlx4_init_mac_table(dev, &info->mac_table);
-       mlx4_init_vlan_table(dev, &info->vlan_table);
-       info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+       if (!mlx4_is_slave(dev)) {
+               INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
+               mlx4_init_mac_table(dev, &info->mac_table);
+               mlx4_init_vlan_table(dev, &info->vlan_table);
+               info->base_qpn =
+                       dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
                        (port - 1) * (1 << log_num_mac);
+       }
 
        sprintf(info->dev_name, "mlx4_port%d", port);
        info->port_attr.attr.name = info->dev_name;
-       info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (mlx4_is_mfunc(dev))
+               info->port_attr.attr.mode = S_IRUGO;
+       else {
+               info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+               info->port_attr.store     = set_port_type;
+       }
        info->port_attr.show      = show_port_type;
-       info->port_attr.store     = set_port_type;
        sysfs_attr_init(&info->port_attr.attr);
 
        err = device_create_file(&dev->pdev->dev, &info->port_attr);
@@ -1220,6 +1587,46 @@ static void mlx4_clear_steering(struct mlx4_dev *dev)
        kfree(priv->steer);
 }
 
+static int extended_func_num(struct pci_dev *pdev)
+{
+       return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
+}
+
+#define MLX4_OWNER_BASE        0x8069c
+#define MLX4_OWNER_SIZE        4
+
+static int mlx4_get_ownership(struct mlx4_dev *dev)
+{
+       void __iomem *owner;
+       u32 ret;
+
+       owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+                       MLX4_OWNER_SIZE);
+       if (!owner) {
+               mlx4_err(dev, "Failed to obtain ownership bit\n");
+               return -ENOMEM;
+       }
+
+       ret = readl(owner);
+       iounmap(owner);
+       return (int) !!ret;
+}
+
+static void mlx4_free_ownership(struct mlx4_dev *dev)
+{
+       void __iomem *owner;
+
+       owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+                       MLX4_OWNER_SIZE);
+       if (!owner) {
+               mlx4_err(dev, "Failed to obtain ownership bit\n");
+               return;
+       }
+       writel(0, owner);
+       msleep(1000);
+       iounmap(owner);
+}
+
 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct mlx4_priv *priv;
@@ -1235,13 +1642,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                        "aborting.\n");
                return err;
        }
-
+       if (num_vfs > MLX4_MAX_NUM_VF) {
+               printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
+                      num_vfs, MLX4_MAX_NUM_VF);
+               return -EINVAL;
+       }
        /*
-        * Check for BARs.  We expect 0: 1MB
+        * Check for BARs.
         */
-       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
-           pci_resource_len(pdev, 0) != 1 << 20) {
-               dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+       if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
+           !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               dev_err(&pdev->dev, "Missing DCS, aborting."
+                       "(id == 0X%p, id->driver_data: 0x%lx,"
+                       " pci_resource_flags(pdev, 0):0x%lx)\n", id,
+                       id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
                err = -ENODEV;
                goto err_disable_pdev;
        }
@@ -1305,42 +1719,132 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        mutex_init(&priv->bf_mutex);
 
        dev->rev_id = pdev->revision;
+       /* Detect if this device is a virtual function */
+       if (id && id->driver_data & MLX4_VF) {
+               /* When acting as pf, we normally skip vfs unless explicitly
+                * requested to probe them. */
+               if (num_vfs && extended_func_num(pdev) > probe_vf) {
+                       mlx4_warn(dev, "Skipping virtual function:%d\n",
+                                               extended_func_num(pdev));
+                       err = -ENODEV;
+                       goto err_free_dev;
+               }
+               mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
+               dev->flags |= MLX4_FLAG_SLAVE;
+       } else {
+               /* We reset the device and enable SRIOV only for physical
+                * devices.  Try to claim ownership on the device;
+                * if already taken, skip -- do not allow multiple PFs */
+               err = mlx4_get_ownership(dev);
+               if (err) {
+                       if (err < 0)
+                               goto err_free_dev;
+                       else {
+                               mlx4_warn(dev, "Multiple PFs not yet supported."
+                                         " Skipping PF.\n");
+                               err = -EINVAL;
+                               goto err_free_dev;
+                       }
+               }
 
-       /*
-        * Now reset the HCA before we touch the PCI capabilities or
-        * attempt a firmware command, since a boot ROM may have left
-        * the HCA in an undefined state.
-        */
-       err = mlx4_reset(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to reset HCA, aborting.\n");
-               goto err_free_dev;
+               if (num_vfs) {
+                       mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
+                       err = pci_enable_sriov(pdev, num_vfs);
+                       if (err) {
+                               mlx4_err(dev, "Failed to enable sriov,"
+                                        "continuing without sriov enabled"
+                                        " (err = %d).\n", err);
+                               num_vfs = 0;
+                               err = 0;
+                       } else {
+                               mlx4_warn(dev, "Running in master mode\n");
+                               dev->flags |= MLX4_FLAG_SRIOV |
+                                             MLX4_FLAG_MASTER;
+                               dev->num_vfs = num_vfs;
+                       }
+               }
+
+               /*
+                * Now reset the HCA before we touch the PCI capabilities or
+                * attempt a firmware command, since a boot ROM may have left
+                * the HCA in an undefined state.
+                */
+               err = mlx4_reset(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+                       goto err_rel_own;
+               }
        }
 
+slave_start:
        if (mlx4_cmd_init(dev)) {
                mlx4_err(dev, "Failed to init command interface, aborting.\n");
-               goto err_free_dev;
+               goto err_sriov;
+       }
+
+       /* In slave functions, the communication channel must be initialized
+        * before posting commands. Also, init num_slaves before calling
+        * mlx4_init_hca */
+       if (mlx4_is_mfunc(dev)) {
+               if (mlx4_is_master(dev))
+                       dev->num_slaves = MLX4_MAX_NUM_SLAVES;
+               else {
+                       dev->num_slaves = 0;
+                       if (mlx4_multi_func_init(dev)) {
+                               mlx4_err(dev, "Failed to init slave mfunc"
+                                        " interface, aborting.\n");
+                               goto err_cmd;
+                       }
+               }
        }
 
        err = mlx4_init_hca(dev);
-       if (err)
-               goto err_cmd;
+       if (err) {
+               if (err == -EACCES) {
+                       /* Not primary Physical function
+                        * Running in slave mode */
+                       mlx4_cmd_cleanup(dev);
+                       dev->flags |= MLX4_FLAG_SLAVE;
+                       dev->flags &= ~MLX4_FLAG_MASTER;
+                       goto slave_start;
+               } else
+                       goto err_mfunc;
+       }
+
+       /* In master functions, the communication channel must be initialized
+        * after obtaining its address from fw */
+       if (mlx4_is_master(dev)) {
+               if (mlx4_multi_func_init(dev)) {
+                       mlx4_err(dev, "Failed to init master mfunc"
+                                "interface, aborting.\n");
+                       goto err_close;
+               }
+       }
 
        err = mlx4_alloc_eq_table(dev);
        if (err)
-               goto err_close;
+               goto err_master_mfunc;
 
        priv->msix_ctl.pool_bm = 0;
        spin_lock_init(&priv->msix_ctl.pool_lock);
 
        mlx4_enable_msi_x(dev);
-
-       err = mlx4_init_steering(dev);
-       if (err)
+       if ((mlx4_is_mfunc(dev)) &&
+           !(dev->flags & MLX4_FLAG_MSI_X)) {
+               mlx4_err(dev, "INTx is not supported in multi-function mode."
+                        " aborting.\n");
                goto err_free_eq;
+       }
+
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_init_steering(dev);
+               if (err)
+                       goto err_free_eq;
+       }
 
        err = mlx4_setup_hca(dev);
-       if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+       if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
+           !mlx4_is_mfunc(dev)) {
                dev->flags &= ~MLX4_FLAG_MSI_X;
                pci_disable_msix(pdev);
                err = mlx4_setup_hca(dev);
@@ -1383,20 +1887,37 @@ err_port:
        mlx4_cleanup_uar_table(dev);
 
 err_steer:
-       mlx4_clear_steering(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_clear_steering(dev);
 
 err_free_eq:
        mlx4_free_eq_table(dev);
 
+err_master_mfunc:
+       if (mlx4_is_master(dev))
+               mlx4_multi_func_cleanup(dev);
+
 err_close:
        if (dev->flags & MLX4_FLAG_MSI_X)
                pci_disable_msix(pdev);
 
        mlx4_close_hca(dev);
 
+err_mfunc:
+       if (mlx4_is_slave(dev))
+               mlx4_multi_func_cleanup(dev);
+
 err_cmd:
        mlx4_cmd_cleanup(dev);
 
+err_sriov:
+       if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
+               pci_disable_sriov(pdev);
+
+err_rel_own:
+       if (!mlx4_is_slave(dev))
+               mlx4_free_ownership(dev);
+
 err_free_dev:
        kfree(priv);
 
@@ -1424,6 +1945,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        int p;
 
        if (dev) {
+               /* in SRIOV it is not allowed to unload the pf's
+                * driver while there are alive vf's */
+               if (mlx4_is_master(dev)) {
+                       if (mlx4_how_many_lives_vf(dev))
+                               printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+               }
                mlx4_stop_sense(dev);
                mlx4_unregister_device(dev);
 
@@ -1443,17 +1970,31 @@ static void mlx4_remove_one(struct pci_dev *pdev)
                mlx4_cleanup_xrcd_table(dev);
                mlx4_cleanup_pd_table(dev);
 
+               if (mlx4_is_master(dev))
+                       mlx4_free_resource_tracker(dev);
+
                iounmap(priv->kar);
                mlx4_uar_free(dev, &priv->driver_uar);
                mlx4_cleanup_uar_table(dev);
-               mlx4_clear_steering(dev);
+               if (!mlx4_is_slave(dev))
+                       mlx4_clear_steering(dev);
                mlx4_free_eq_table(dev);
+               if (mlx4_is_master(dev))
+                       mlx4_multi_func_cleanup(dev);
                mlx4_close_hca(dev);
+               if (mlx4_is_slave(dev))
+                       mlx4_multi_func_cleanup(dev);
                mlx4_cmd_cleanup(dev);
 
                if (dev->flags & MLX4_FLAG_MSI_X)
                        pci_disable_msix(pdev);
+               if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
+                       mlx4_warn(dev, "Disabling sriov\n");
+                       pci_disable_sriov(pdev);
+               }
 
+               if (!mlx4_is_slave(dev))
+                       mlx4_free_ownership(dev);
                kfree(priv);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
@@ -1468,33 +2009,48 @@ int mlx4_restart_one(struct pci_dev *pdev)
 }
 
 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
-       { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
-       { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
-       { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
-       { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
-       { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
-       { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
-       { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
-       { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
-       { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
+       /* MT25408 "Hermon" SDR */
+       { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
+       /* MT25408 "Hermon" DDR */
+       { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
+       /* MT25408 "Hermon" QDR */
+       { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
+       /* MT25408 "Hermon" DDR PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
+       /* MT25408 "Hermon" QDR PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
+       /* MT25408 "Hermon" EN 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
+       /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
+       /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
+       /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
+       /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+       { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
+       /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+       { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
+       /* MT26478 ConnectX2 40GigE PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+       /* MT25400 Family [ConnectX-2 Virtual Function] */
+       { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
+       /* MT27500 Family [ConnectX-3] */
+       { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+       /* MT27500 Family [ConnectX-3 Virtual Function] */
+       { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
+       { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
        { 0, }
 };
 
@@ -1523,6 +2079,12 @@ static int __init mlx4_verify_params(void)
                return -1;
        }
 
+       /* Check if module param for ports type has legal combination */
+       if (port_type_array[0] == false && port_type_array[1] == true) {
+               printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+               port_type_array[0] = true;
+       }
+
        return 0;
 }
 
index 978688c..0785d9b 100644 (file)
 
 static const u8 zero_gid[16];  /* automatically initialized to 0 */
 
+struct mlx4_mgm {
+       __be32                  next_gid_index;
+       __be32                  members_count;
+       u32                     reserved[2];
+       u8                      gid[16];
+       __be32                  qp[MLX4_MAX_QP_PER_MGM];
+};
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
+{
+       return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+}
+
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
+{
+       return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
+}
+
 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
                           struct mlx4_cmd_mailbox *mailbox)
 {
        return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
                            struct mlx4_cmd_mailbox *mailbox)
 {
        return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
-static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
                              struct mlx4_cmd_mailbox *mailbox)
 {
        u32 in_mod;
 
-       in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+       in_mod = (u32) port << 16 | steer << 1;
        return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
-                       MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_NATIVE);
 }
 
 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -75,7 +94,8 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
        int err;
 
        err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
-                          MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
+                          MLX4_CMD_NATIVE);
 
        if (!err)
                *hash = imm;
@@ -102,7 +122,7 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
  * Add new entry to steering data structure.
  * All promisc QPs should be added as well
  */
-static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int new_steering_entry(struct mlx4_dev *dev, u8 port,
                              enum mlx4_steer_type steer,
                              unsigned int index, u32 qpn)
 {
@@ -115,10 +135,8 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        struct mlx4_promisc_qp *dqp = NULL;
        u32 prot;
        int err;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
        new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
        if (!new_entry)
                return -ENOMEM;
@@ -130,7 +148,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        /* If the given qpn is also a promisc qp,
         * it should be inserted to duplicates list
         */
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       pqp = get_promisc_qp(dev, 0, steer, qpn);
        if (pqp) {
                dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
                if (!dqp) {
@@ -165,7 +183,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
                /* don't add already existing qpn */
                if (pqp->qpn == qpn)
                        continue;
-               if (members_count == MLX4_QP_PER_MGM) {
+               if (members_count == dev->caps.num_qp_per_mgm) {
                        /* out of space */
                        err = -ENOMEM;
                        goto out_mailbox;
@@ -193,7 +211,7 @@ out_alloc:
 }
 
 /* update the data structures with existing steering entry */
-static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
                                   enum mlx4_steer_type steer,
                                   unsigned int index, u32 qpn)
 {
@@ -201,12 +219,10 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        struct mlx4_steer_index *tmp_entry, *entry = NULL;
        struct mlx4_promisc_qp *pqp;
        struct mlx4_promisc_qp *dqp;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
 
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       pqp = get_promisc_qp(dev, 0, steer, qpn);
        if (!pqp)
                return 0; /* nothing to do */
 
@@ -225,7 +241,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
         * we need to add it as a duplicate to this entry
         * for future references */
        list_for_each_entry(dqp, &entry->duplicates, list) {
-               if (qpn == dqp->qpn)
+               if (qpn == pqp->qpn)
                        return 0; /* qp is already duplicated */
        }
 
@@ -241,20 +257,18 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
 
 /* Check whether a qpn is a duplicate on steering entry
  * If so, it should not be removed from mgm */
-static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
                                  enum mlx4_steer_type steer,
                                  unsigned int index, u32 qpn)
 {
        struct mlx4_steer *s_steer;
        struct mlx4_steer_index *tmp_entry, *entry = NULL;
        struct mlx4_promisc_qp *dqp, *tmp_dqp;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
 
        /* if qp is not promisc, it cannot be duplicated */
-       if (!get_promisc_qp(dev, pf_num, steer, qpn))
+       if (!get_promisc_qp(dev, 0, steer, qpn))
                return false;
 
        /* The qp is promisc qp so it is a duplicate on this index
@@ -279,7 +293,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
 }
 
 /* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
                                      enum mlx4_steer_type steer,
                                      unsigned int index, u32 tqpn)
 {
@@ -291,10 +305,8 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        u32 members_count;
        bool ret = false;
        int i;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
@@ -306,7 +318,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
        for (i = 0;  i < members_count; i++) {
                qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
-               if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+               if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
                        /* the qp is not promisc, the entry can't be removed */
                        goto out;
                }
@@ -332,7 +344,7 @@ out:
        return ret;
 }
 
-static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
                          enum mlx4_steer_type steer, u32 qpn)
 {
        struct mlx4_steer *s_steer;
@@ -347,14 +359,13 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
        bool found;
        int last_index;
        int err;
-       u8 pf_num;
        struct mlx4_priv *priv = mlx4_priv(dev);
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+       s_steer = &mlx4_priv(dev)->steer[0];
 
        mutex_lock(&priv->mcg_table.mutex);
 
-       if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+       if (get_promisc_qp(dev, 0, steer, qpn)) {
                err = 0;  /* Noting to do, already exists */
                goto out_mutex;
        }
@@ -397,7 +408,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
                }
                if (!found) {
                        /* Need to add the qpn to mgm */
-                       if (members_count == MLX4_QP_PER_MGM) {
+                       if (members_count == dev->caps.num_qp_per_mgm) {
                                /* entry is full */
                                err = -ENOMEM;
                                goto out_mailbox;
@@ -420,7 +431,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
                mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
        mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 
-       err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+       err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
        if (err)
                goto out_list;
 
@@ -439,7 +450,7 @@ out_mutex:
        return err;
 }
 
-static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
                             enum mlx4_steer_type steer, u32 qpn)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -454,13 +465,11 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
        bool back_to_list = false;
        int loc, i;
        int err;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
        mutex_lock(&priv->mcg_table.mutex);
 
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       pqp = get_promisc_qp(dev, 0, steer, qpn);
        if (unlikely(!pqp)) {
                mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
                /* nothing to do */
@@ -479,12 +488,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
                goto out_list;
        }
        mgm = mailbox->buf;
+       memset(mgm, 0, sizeof *mgm);
        members_count = 0;
        list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
                mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
        mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 
-       err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+       err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
        if (err)
                goto out_mailbox;
 
@@ -649,12 +659,13 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                }
                index += dev->caps.num_mgms;
 
+               new_entry = 1;
                memset(mgm, 0, sizeof *mgm);
                memcpy(mgm->gid, gid, 16);
        }
 
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-       if (members_count == MLX4_QP_PER_MGM) {
+       if (members_count == dev->caps.num_qp_per_mgm) {
                mlx4_err(dev, "MGM at index %x is full.\n", index);
                err = -ENOMEM;
                goto out;
@@ -696,9 +707,9 @@ out:
        if (prot == MLX4_PROT_ETH) {
                /* manage the steering entry for promisc mode */
                if (new_entry)
-                       new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+                       new_steering_entry(dev, port, steer, index, qp->qpn);
                else
-                       existing_steering_entry(dev, 0, port, steer,
+                       existing_steering_entry(dev, port, steer,
                                                index, qp->qpn);
        }
        if (err && link && index != -1) {
@@ -749,7 +760,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 
        /* if this pq is also a promisc qp, it shouldn't be removed */
        if (prot == MLX4_PROT_ETH &&
-           check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+           check_duplicate_entry(dev, port, steer, index, qp->qpn))
                goto out;
 
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
@@ -769,7 +780,8 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
        mgm->qp[i - 1]     = 0;
 
        if (prot == MLX4_PROT_ETH)
-               removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+               removed_entry = can_remove_steering_entry(dev, port, steer,
+                                                               index, qp->qpn);
        if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
                err = mlx4_WRITE_ENTRY(dev, index, mailbox);
                goto out;
@@ -828,6 +840,34 @@ out:
        return err;
 }
 
+static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                         u8 gid[16], u8 attach, u8 block_loopback,
+                         enum mlx4_protocol prot)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       int err = 0;
+       int qpn;
+
+       if (!mlx4_is_mfunc(dev))
+               return -EBADF;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       memcpy(mailbox->buf, gid, 16);
+       qpn = qp->qpn;
+       qpn |= (prot << 28);
+       if (attach && block_loopback)
+               qpn |= (1 << 31);
+
+       err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
+                      MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_WRAPPED);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
 
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          int block_mcast_loopback, enum mlx4_protocol prot)
@@ -843,9 +883,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
        if (prot == MLX4_PROT_ETH)
                gid[7] |= (steer << 1);
 
-       return mlx4_qp_attach_common(dev, qp, gid,
-                                    block_mcast_loopback, prot,
-                                    steer);
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 1,
+                                       block_mcast_loopback, prot);
+
+       return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+                                       prot, steer);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
 
@@ -860,22 +903,90 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                        !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
-       if (prot == MLX4_PROT_ETH) {
+       if (prot == MLX4_PROT_ETH)
                gid[7] |= (steer << 1);
-       }
+
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
 
        return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
 
+int mlx4_unicast_attach(struct mlx4_dev *dev,
+                       struct mlx4_qp *qp, u8 gid[16],
+                       int block_mcast_loopback, enum mlx4_protocol prot)
+{
+       if (prot == MLX4_PROT_ETH &&
+                       !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+               return 0;
+
+       if (prot == MLX4_PROT_ETH)
+               gid[7] |= (MLX4_UC_STEER << 1);
+
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 1,
+                                       block_mcast_loopback, prot);
+
+       return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+                                       prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
+
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                              u8 gid[16], enum mlx4_protocol prot)
+{
+       if (prot == MLX4_PROT_ETH &&
+                       !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+               return 0;
+
+       if (prot == MLX4_PROT_ETH)
+               gid[7] |= (MLX4_UC_STEER << 1);
+
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+       return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd)
+{
+       u32 qpn = (u32) vhcr->in_param & 0xffffffff;
+       u8 port = vhcr->in_param >> 62;
+       enum mlx4_steer_type steer = vhcr->in_modifier;
+
+       /* Promiscuous unicast is not allowed in mfunc */
+       if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
+               return 0;
+
+       if (vhcr->op_modifier)
+               return add_promisc_qp(dev, port, steer, qpn);
+       else
+               return remove_promisc_qp(dev, port, steer, qpn);
+}
+
+static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
+                       enum mlx4_steer_type steer, u8 add, u8 port)
+{
+       return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
+                       MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
+}
 
 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
 {
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
 
-       return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+       return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
 
@@ -884,8 +995,10 @@ int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
 
-       return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+       return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
 
@@ -894,8 +1007,10 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
 
-       return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+       return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
 
@@ -904,7 +1019,10 @@ int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
-       return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
+
+       return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
 
index 5dfa68f..a80121a 100644 (file)
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
 
 #define DRV_NAME       "mlx4_core"
-#define DRV_VERSION    "1.0"
-#define DRV_RELDATE    "July 14, 2011"
+#define PFX            DRV_NAME ": "
+#define DRV_VERSION    "1.1"
+#define DRV_RELDATE    "Dec, 2011"
 
 enum {
        MLX4_HCR_BASE           = 0x80680,
        MLX4_HCR_SIZE           = 0x0001c,
-       MLX4_CLR_INT_SIZE       = 0x00008
+       MLX4_CLR_INT_SIZE       = 0x00008,
+       MLX4_SLAVE_COMM_BASE    = 0x0,
+       MLX4_COMM_PAGESIZE      = 0x1000
 };
 
 enum {
-       MLX4_MGM_ENTRY_SIZE     =  0x100,
-       MLX4_QP_PER_MGM         = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
-       MLX4_MTT_ENTRY_PER_SEG  = 8
+       MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
+       MLX4_MAX_QP_PER_MGM     = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+       MLX4_MTT_ENTRY_PER_SEG  = 8,
 };
 
 enum {
@@ -80,6 +84,94 @@ enum {
        MLX4_NUM_CMPTS          = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
 };
 
+enum mlx4_mr_state {
+       MLX4_MR_DISABLED = 0,
+       MLX4_MR_EN_HW,
+       MLX4_MR_EN_SW
+};
+
+#define MLX4_COMM_TIME         10000
+enum {
+       MLX4_COMM_CMD_RESET,
+       MLX4_COMM_CMD_VHCR0,
+       MLX4_COMM_CMD_VHCR1,
+       MLX4_COMM_CMD_VHCR2,
+       MLX4_COMM_CMD_VHCR_EN,
+       MLX4_COMM_CMD_VHCR_POST,
+       MLX4_COMM_CMD_FLR = 254
+};
+
+/*The flag indicates that the slave should delay the RESET cmd*/
+#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
+/*indicates how many retries will be done if we are in the middle of FLR*/
+#define NUM_OF_RESET_RETRIES   10
+#define SLEEP_TIME_IN_RESET    (2 * 1000)
+enum mlx4_resource {
+       RES_QP,
+       RES_CQ,
+       RES_SRQ,
+       RES_XRCD,
+       RES_MPT,
+       RES_MTT,
+       RES_MAC,
+       RES_VLAN,
+       RES_EQ,
+       RES_COUNTER,
+       MLX4_NUM_OF_RESOURCE_TYPE
+};
+
+enum mlx4_alloc_mode {
+       RES_OP_RESERVE,
+       RES_OP_RESERVE_AND_MAP,
+       RES_OP_MAP_ICM,
+};
+
+
+/*
+ *Virtual HCR structures.
+ * mlx4_vhcr is the sw representation, in machine endianess
+ *
+ * mlx4_vhcr_cmd is the formalized structure, the one that is passed
+ * to FW to go through communication channel.
+ * It is big endian, and has the same structure as the physical HCR
+ * used by command interface
+ */
+struct mlx4_vhcr {
+       u64     in_param;
+       u64     out_param;
+       u32     in_modifier;
+       u32     errno;
+       u16     op;
+       u16     token;
+       u8      op_modifier;
+       u8      e_bit;
+};
+
+struct mlx4_vhcr_cmd {
+       __be64 in_param;
+       __be32 in_modifier;
+       __be64 out_param;
+       __be16 token;
+       u16 reserved;
+       u8 status;
+       u8 flags;
+       __be16 opcode;
+};
+
+struct mlx4_cmd_info {
+       u16 opcode;
+       bool has_inbox;
+       bool has_outbox;
+       bool out_is_imm;
+       bool encode_slave_id;
+       int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+                     struct mlx4_cmd_mailbox *inbox);
+       int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+                      struct mlx4_cmd_mailbox *inbox,
+                      struct mlx4_cmd_mailbox *outbox,
+                      struct mlx4_cmd_info *cmd);
+};
+
 #ifdef CONFIG_MLX4_DEBUG
 extern int mlx4_debug_level;
 #else /* CONFIG_MLX4_DEBUG */
@@ -99,6 +191,12 @@ do {                                                                        \
 #define mlx4_warn(mdev, format, arg...) \
        dev_warn(&mdev->pdev->dev, format, ##arg)
 
+extern int mlx4_log_num_mgm_entry_size;
+extern int log_mtts_per_seg;
+
+#define MLX4_MAX_NUM_SLAVES    (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define ALL_SLAVES 0xff
+
 struct mlx4_bitmap {
        u32                     last;
        u32                     top;
@@ -130,6 +228,147 @@ struct mlx4_icm_table {
        struct mlx4_icm       **icm;
 };
 
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+       __be32 flags;
+       __be32 qpn;
+       __be32 key;
+       __be32 pd_flags;
+       __be64 start;
+       __be64 length;
+       __be32 lkey;
+       __be32 win_cnt;
+       u8      reserved1[3];
+       u8      mtt_rep;
+       __be64 mtt_addr;
+       __be32 mtt_sz;
+       __be32 entity_size;
+       __be32 first_byte_offset;
+} __packed;
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+       __be32                  flags;
+       u16                     reserved1[3];
+       __be16                  page_offset;
+       u8                      log_eq_size;
+       u8                      reserved2[4];
+       u8                      eq_period;
+       u8                      reserved3;
+       u8                      eq_max_count;
+       u8                      reserved4[3];
+       u8                      intr;
+       u8                      log_page_size;
+       u8                      reserved5[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       u32                     reserved6[2];
+       __be32                  consumer_index;
+       __be32                  producer_index;
+       u32                     reserved7[4];
+};
+
+struct mlx4_cq_context {
+       __be32                  flags;
+       u16                     reserved1[3];
+       __be16                  page_offset;
+       __be32                  logsize_usrpage;
+       __be16                  cq_period;
+       __be16                  cq_max_count;
+       u8                      reserved2[3];
+       u8                      comp_eqn;
+       u8                      log_page_size;
+       u8                      reserved3[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       __be32                  last_notified_index;
+       __be32                  solicit_producer_index;
+       __be32                  consumer_index;
+       __be32                  producer_index;
+       u32                     reserved4[2];
+       __be64                  db_rec_addr;
+};
+
+struct mlx4_srq_context {
+       __be32                  state_logsize_srqn;
+       u8                      logstride;
+       u8                      reserved1;
+       __be16                  xrcd;
+       __be32                  pg_offset_cqn;
+       u32                     reserved2;
+       u8                      log_page_size;
+       u8                      reserved3[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       __be32                  pd;
+       __be16                  limit_watermark;
+       __be16                  wqe_cnt;
+       u16                     reserved4;
+       __be16                  wqe_counter;
+       u32                     reserved5;
+       __be64                  db_rec_addr;
+};
+
+struct mlx4_eqe {
+       u8                      reserved1;
+       u8                      type;
+       u8                      reserved2;
+       u8                      subtype;
+       union {
+               u32             raw[6];
+               struct {
+                       __be32  cqn;
+               } __packed comp;
+               struct {
+                       u16     reserved1;
+                       __be16  token;
+                       u32     reserved2;
+                       u8      reserved3[3];
+                       u8      status;
+                       __be64  out_param;
+               } __packed cmd;
+               struct {
+                       __be32  qpn;
+               } __packed qp;
+               struct {
+                       __be32  srqn;
+               } __packed srq;
+               struct {
+                       __be32  cqn;
+                       u32     reserved1;
+                       u8      reserved2[3];
+                       u8      syndrome;
+               } __packed cq_err;
+               struct {
+                       u32     reserved1[2];
+                       __be32  port;
+               } __packed port_change;
+               struct {
+                       #define COMM_CHANNEL_BIT_ARRAY_SIZE     4
+                       u32 reserved;
+                       u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+               } __packed comm_channel_arm;
+               struct {
+                       u8      port;
+                       u8      reserved[3];
+                       __be64  mac;
+               } __packed mac_update;
+               struct {
+                       u8      port;
+               } __packed sw_event;
+               struct {
+                       __be32  slave_id;
+               } __packed flr_event;
+       }                       event;
+       u8                      slave_id;
+       u8                      reserved3[2];
+       u8                      owner;
+} __packed;
+
 struct mlx4_eq {
        struct mlx4_dev        *dev;
        void __iomem           *doorbell;
@@ -142,6 +381,18 @@ struct mlx4_eq {
        struct mlx4_mtt         mtt;
 };
 
+struct mlx4_slave_eqe {
+       u8 type;
+       u8 port;
+       u32 param;
+};
+
+struct mlx4_slave_event_eq_info {
+       u32 eqn;
+       u16 token;
+       u64 event_type;
+};
+
 struct mlx4_profile {
        int                     num_qp;
        int                     rdmarc_per_qp;
@@ -155,16 +406,37 @@ struct mlx4_profile {
 struct mlx4_fw {
        u64                     clr_int_base;
        u64                     catas_offset;
+       u64                     comm_base;
        struct mlx4_icm        *fw_icm;
        struct mlx4_icm        *aux_icm;
        u32                     catas_size;
        u16                     fw_pages;
        u8                      clr_int_bar;
        u8                      catas_bar;
+       u8                      comm_bar;
 };
 
-#define MGM_QPN_MASK       0x00FFFFFF
-#define MGM_BLCK_LB_BIT    30
+struct mlx4_comm {
+       u32                     slave_write;
+       u32                     slave_read;
+};
+
+enum {
+       MLX4_MCAST_CONFIG       = 0,
+       MLX4_MCAST_DISABLE      = 1,
+       MLX4_MCAST_ENABLE       = 2,
+};
+
+#define VLAN_FLTR_SIZE 128
+
+struct mlx4_vlan_fltr {
+       __be32 entry[VLAN_FLTR_SIZE];
+};
+
+struct mlx4_mcast_entry {
+       struct list_head list;
+       u64 addr;
+};
 
 struct mlx4_promisc_qp {
        struct list_head list;
@@ -177,19 +449,87 @@ struct mlx4_steer_index {
        struct list_head duplicates;
 };
 
-struct mlx4_mgm {
-       __be32                  next_gid_index;
-       __be32                  members_count;
-       u32                     reserved[2];
-       u8                      gid[16];
-       __be32                  qp[MLX4_QP_PER_MGM];
+struct mlx4_slave_state {
+       u8 comm_toggle;
+       u8 last_cmd;
+       u8 init_port_mask;
+       bool active;
+       u8 function;
+       dma_addr_t vhcr_dma;
+       u16 mtu[MLX4_MAX_PORTS + 1];
+       __be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
+       struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
+       struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
+       struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
+       struct mlx4_slave_event_eq_info event_eq;
+       u16 eq_pi;
+       u16 eq_ci;
+       spinlock_t lock;
+       /*initialized via the kzalloc*/
+       u8 is_slave_going_down;
+       u32 cookie;
+};
+
+struct slave_list {
+       struct mutex mutex;
+       struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
+};
+
+struct mlx4_resource_tracker {
+       spinlock_t lock;
+       /* tree for each resources */
+       struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+       /* num_of_slave's lists, one per slave */
+       struct slave_list *slave_list;
+};
+
+#define SLAVE_EVENT_EQ_SIZE    128
+struct mlx4_slave_event_eq {
+       u32 eqn;
+       u32 cons;
+       u32 prod;
+       struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
+};
+
+struct mlx4_master_qp0_state {
+       int proxy_qp0_active;
+       int qp0_active;
+       int port_active;
+};
+
+struct mlx4_mfunc_master_ctx {
+       struct mlx4_slave_state *slave_state;
+       struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
+       int                     init_port_ref[MLX4_MAX_PORTS + 1];
+       u16                     max_mtu[MLX4_MAX_PORTS + 1];
+       int                     disable_mcast_ref[MLX4_MAX_PORTS + 1];
+       struct mlx4_resource_tracker res_tracker;
+       struct workqueue_struct *comm_wq;
+       struct work_struct      comm_work;
+       struct work_struct      slave_event_work;
+       struct work_struct      slave_flr_event_work;
+       spinlock_t              slave_state_lock;
+       __be32                  comm_arm_bit_vector[4];
+       struct mlx4_eqe         cmd_eqe;
+       struct mlx4_slave_event_eq slave_eq;
+       struct mutex            gen_eqe_mutex[MLX4_MFUNC_MAX];
+};
+
+struct mlx4_mfunc {
+       struct mlx4_comm __iomem       *comm;
+       struct mlx4_vhcr_cmd           *vhcr;
+       dma_addr_t                      vhcr_dma;
+
+       struct mlx4_mfunc_master_ctx    master;
 };
+
 struct mlx4_cmd {
        struct pci_pool        *pool;
        void __iomem           *hcr;
        struct mutex            hcr_mutex;
        struct semaphore        poll_sem;
        struct semaphore        event_sem;
+       struct semaphore        slave_sem;
        int                     max_cmds;
        spinlock_t              context_lock;
        int                     free_head;
@@ -197,6 +537,7 @@ struct mlx4_cmd {
        u16                     token_mask;
        u8                      use_events;
        u8                      toggle;
+       u8                      comm_toggle;
 };
 
 struct mlx4_uar_table {
@@ -287,6 +628,48 @@ struct mlx4_vlan_table {
        int                     max;
 };
 
+#define SET_PORT_GEN_ALL_VALID         0x7
+#define SET_PORT_PROMISC_SHIFT         31
+#define SET_PORT_MC_PROMISC_SHIFT      30
+
+enum {
+       MCAST_DIRECT_ONLY       = 0,
+       MCAST_DIRECT            = 1,
+       MCAST_DEFAULT           = 2
+};
+
+
+struct mlx4_set_port_general_context {
+       u8 reserved[3];
+       u8 flags;
+       u16 reserved2;
+       __be16 mtu;
+       u8 pptx;
+       u8 pfctx;
+       u16 reserved3;
+       u8 pprx;
+       u8 pfcrx;
+       u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+       __be32 base_qpn;
+       u8 rererved;
+       u8 n_mac;
+       u8 n_vlan;
+       u8 n_prio;
+       u8 reserved2[3];
+       u8 mac_miss;
+       u8 intra_no_vlan;
+       u8 no_vlan;
+       u8 intra_vlan_miss;
+       u8 vlan_miss;
+       u8 reserved3[3];
+       u8 no_vlan_prio;
+       __be32 promisc;
+       __be32 mcast;
+};
+
 struct mlx4_mac_entry {
        u64 mac;
 };
@@ -333,6 +716,7 @@ struct mlx4_priv {
 
        struct mlx4_fw          fw;
        struct mlx4_cmd         cmd;
+       struct mlx4_mfunc       mfunc;
 
        struct mlx4_bitmap      pd_bitmap;
        struct mlx4_bitmap      xrcd_bitmap;
@@ -359,6 +743,7 @@ struct mlx4_priv {
        struct list_head        bf_list;
        struct mutex            bf_mutex;
        struct io_mapping       *bf_mapping;
+       int                     reserved_mtts;
 };
 
 static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -403,6 +788,62 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
 void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
+int __mlx4_mr_reserve(struct mlx4_dev *dev);
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+                    struct mlx4_vhcr *vhcr,
+                    struct mlx4_cmd_mailbox *inbox,
+                    struct mlx4_cmd_mailbox *outbox,
+                    struct mlx4_cmd_info *cmd);
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+                           int *base);
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    int start_index, int npages, u64 *page_list);
 
 void mlx4_start_catas_poll(struct mlx4_dev *dev);
 void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -419,13 +860,113 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                      struct mlx4_profile *request,
                      struct mlx4_dev_cap *dev_cap,
                      struct mlx4_init_hca_param *init_hca);
+void mlx4_master_comm_channel(struct work_struct *work);
+void mlx4_gen_slave_eqe(struct work_struct *work);
+void mlx4_master_handle_slave_flr(struct work_struct *work);
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd);
+int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd);
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr,
+                       struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd);
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd);
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd);
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd);
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
 
 int mlx4_cmd_init(struct mlx4_dev *dev);
 void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+int mlx4_multi_func_init(struct mlx4_dev *dev);
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
 int mlx4_cmd_use_events(struct mlx4_dev *dev);
 void mlx4_cmd_use_polling(struct mlx4_dev *dev);
 
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+                 unsigned long timeout);
+
 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
 
@@ -452,12 +993,113 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
 
 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+/* resource tracker functions*/
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+                                   enum mlx4_resource resource_type,
+                                   int resource_id, int *slave);
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
+int mlx4_init_resource_tracker(struct mlx4_dev *dev);
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev);
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
 int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port);
 
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd);
 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          enum mlx4_protocol prot, enum mlx4_steer_type steer);
 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          int block_mcast_loopback, enum mlx4_protocol prot,
                          enum mlx4_steer_type steer);
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd);
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd);
+int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
+                                    int port, void *buf);
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
+                               struct mlx4_cmd_mailbox *outbox);
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+                                  struct mlx4_vhcr *vhcr,
+                                  struct mlx4_cmd_mailbox *inbox,
+                                  struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd);
+int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd);
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
+
+static inline void set_param_l(u64 *arg, u32 val)
+{
+       *((u32 *)arg) = val;
+}
+
+static inline void set_param_h(u64 *arg, u32 val)
+{
+       *arg = (*arg & 0xffffffff) | ((u64) val << 32);
+}
+
+static inline u32 get_param_l(u64 *arg)
+{
+       return (u32) (*arg & 0xffffffff);
+}
+
+static inline u32 get_param_h(u64 *arg)
+{
+       return (u32)(*arg >> 32);
+}
+
+static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
+{
+       return &mlx4_priv(dev)->mfunc.master.res_tracker.lock;
+}
+
+#define NOT_MASKED_PD_BITS 17
+
 #endif /* MLX4_H */
index ea2ba68..f2a8e65 100644 (file)
@@ -51,8 +51,8 @@
 #include "en_port.h"
 
 #define DRV_NAME       "mlx4_en"
-#define DRV_VERSION    "1.5.4.2"
-#define DRV_RELDATE    "October 2011"
+#define DRV_VERSION    "2.0"
+#define DRV_RELDATE    "Dec 2011"
 
 #define MLX4_EN_MSG_LEVEL      (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
 
index efa3e77..f7243b2 100644 (file)
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/slab.h>
+#include <linux/kernel.h>
 
 #include <linux/mlx4/cmd.h>
 
 #include "mlx4.h"
 #include "icm.h"
 
-/*
- * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_mpt_entry {
-       __be32 flags;
-       __be32 qpn;
-       __be32 key;
-       __be32 pd_flags;
-       __be64 start;
-       __be64 length;
-       __be32 lkey;
-       __be32 win_cnt;
-       u8      reserved1[3];
-       u8      mtt_rep;
-       __be64 mtt_seg;
-       __be32 mtt_sz;
-       __be32 entity_size;
-       __be32 first_byte_offset;
-} __packed;
-
 #define MLX4_MPT_FLAG_SW_OWNS      (0xfUL << 28)
 #define MLX4_MPT_FLAG_FREE         (0x3UL << 28)
 #define MLX4_MPT_FLAG_MIO          (1 << 17)
@@ -180,22 +162,48 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
        kfree(buddy->num_free);
 }
 
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
 {
        struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
        u32 seg;
+       int seg_order;
+       u32 offset;
 
-       seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+       seg_order = max_t(int, order - log_mtts_per_seg, 0);
+
+       seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
        if (seg == -1)
                return -1;
 
-       if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
-                                seg + (1 << order) - 1)) {
-               mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+       offset = seg * (1 << log_mtts_per_seg);
+
+       if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
+                                offset + (1 << order) - 1)) {
+               mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
                return -1;
        }
 
-       return seg;
+       return offset;
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+       u64 in_param;
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, order);
+               err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
+                                                      RES_OP_RESERVE_AND_MAP,
+                                                      MLX4_CMD_ALLOC_RES,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_WRAPPED);
+               if (err)
+                       return -1;
+               return get_param_l(&out_param);
+       }
+       return __mlx4_alloc_mtt_range(dev, order);
 }
 
 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -210,33 +218,63 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
        } else
                mtt->page_shift = page_shift;
 
-       for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+       for (mtt->order = 0, i = 1; i < npages; i <<= 1)
                ++mtt->order;
 
-       mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
-       if (mtt->first_seg == -1)
+       mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
+       if (mtt->offset == -1)
                return -ENOMEM;
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
 
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
 {
+       u32 first_seg;
+       int seg_order;
        struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
 
+       seg_order = max_t(int, order - log_mtts_per_seg, 0);
+       first_seg = offset / (1 << log_mtts_per_seg);
+
+       mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
+       mlx4_table_put_range(dev, &mr_table->mtt_table, first_seg,
+                            first_seg + (1 << seg_order) - 1);
+}
+
+static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, offset);
+               set_param_h(&in_param, order);
+               err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
+                                                      MLX4_CMD_FREE_RES,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_WRAPPED);
+               if (err)
+                       mlx4_warn(dev, "Failed to free mtt range at:"
+                                 "%d order:%d\n", offset, order);
+               return;
+       }
+        __mlx4_free_mtt_range(dev, offset, order);
+}
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
        if (mtt->order < 0)
                return;
 
-       mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
-       mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
-                            mtt->first_seg + (1 << mtt->order) - 1);
+       mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
 
 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
 {
-       return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+       return (u64) mtt->offset * dev->caps.mtt_entry_sz;
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
 
@@ -253,69 +291,205 @@ static u32 key_to_hw_index(u32 key)
 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int mpt_index)
 {
-       return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
-                       MLX4_CMD_TIME_CLASS_B);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
+                       0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int mpt_index)
 {
        return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
-                           !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+                           !mailbox, MLX4_CMD_HW2SW_MPT,
+                           MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 }
 
-int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
-                 int npages, int page_shift, struct mlx4_mr *mr)
+static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+                         u32 *base_mridx)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       u32 index;
-       int err;
+       u32 mridx;
 
-       index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
-       if (index == -1)
+       mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
+       if (mridx == -1)
                return -ENOMEM;
 
+       *base_mridx = mridx;
+       return 0;
+
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
+
+static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
+
+static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+                          u64 iova, u64 size, u32 access, int npages,
+                          int page_shift, struct mlx4_mr *mr)
+{
        mr->iova       = iova;
        mr->size       = size;
        mr->pd         = pd;
        mr->access     = access;
-       mr->enabled    = 0;
-       mr->key        = hw_index_to_key(index);
+       mr->enabled    = MLX4_MR_DISABLED;
+       mr->key        = hw_index_to_key(mridx);
+
+       return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
+
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
+                         struct mlx4_cmd_mailbox *mailbox,
+                         int num_entries)
+{
+       return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
+                       MLX4_CMD_TIME_CLASS_A,  MLX4_CMD_WRAPPED);
+}
 
-       err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+int __mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+}
+
+static int mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+       u64 out_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
+                                  MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+                       return -1;
+               return get_param_l(&out_param);
+       }
+       return  __mlx4_mr_reserve(dev);
+}
+
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+}
+
+static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, index);
+               if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
+                              MLX4_CMD_FREE_RES,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed to release mr index:%d\n",
+                                 index);
+               return;
+       }
+       __mlx4_mr_release(dev, index);
+}
+
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+       return mlx4_table_get(dev, &mr_table->dmpt_table, index);
+}
+
+static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+       u64 param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&param, index);
+               return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
+                                                       MLX4_CMD_ALLOC_RES,
+                                                       MLX4_CMD_TIME_CLASS_A,
+                                                       MLX4_CMD_WRAPPED);
+       }
+       return __mlx4_mr_alloc_icm(dev, index);
+}
+
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+       mlx4_table_put(dev, &mr_table->dmpt_table, index);
+}
+
+static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, index);
+               if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
+                            MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+                            MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
+                                 index);
+               return;
+       }
+       return __mlx4_mr_free_icm(dev, index);
+}
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+                 int npages, int page_shift, struct mlx4_mr *mr)
+{
+       u32 index;
+       int err;
+
+       index = mlx4_mr_reserve(dev);
+       if (index == -1)
+               return -ENOMEM;
+
+       err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
+                                    access, npages, page_shift, mr);
        if (err)
-               mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+               mlx4_mr_release(dev, index);
 
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
 
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
-       struct mlx4_priv *priv = mlx4_priv(dev);
        int err;
 
-       if (mr->enabled) {
+       if (mr->enabled == MLX4_MR_EN_HW) {
                err = mlx4_HW2SW_MPT(dev, NULL,
                                     key_to_hw_index(mr->key) &
                                     (dev->caps.num_mpts - 1));
                if (err)
-                       mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
-       }
+                       mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
 
+               mr->enabled = MLX4_MR_EN_SW;
+       }
        mlx4_mtt_cleanup(dev, &mr->mtt);
-       mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+       mlx4_mr_free_reserved(dev, mr);
+       if (mr->enabled)
+               mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
+       mlx4_mr_release(dev, key_to_hw_index(mr->key));
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_free);
 
 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
        struct mlx4_cmd_mailbox *mailbox;
        struct mlx4_mpt_entry *mpt_entry;
        int err;
 
-       err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+       err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
        if (err)
                return err;
 
@@ -340,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
 
        if (mr->mtt.order < 0) {
                mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
-               mpt_entry->mtt_seg = 0;
+               mpt_entry->mtt_addr = 0;
        } else {
-               mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+               mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
+                                                 &mr->mtt));
        }
 
        if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
@@ -350,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
                mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
                mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
                                                   MLX4_MPT_PD_FLAG_RAE);
-               mpt_entry->mtt_sz    = cpu_to_be32((1 << mr->mtt.order) *
-                                                  dev->caps.mtts_per_seg);
+               mpt_entry->mtt_sz    = cpu_to_be32(1 << mr->mtt.order);
        } else {
                mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
        }
@@ -362,8 +536,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
                mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
                goto err_cmd;
        }
-
-       mr->enabled = 1;
+       mr->enabled = MLX4_MR_EN_HW;
 
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -373,7 +546,7 @@ err_cmd:
        mlx4_free_cmd_mailbox(dev, mailbox);
 
 err_table:
-       mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+       mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -385,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
        __be64 *mtts;
        dma_addr_t dma_handle;
        int i;
-       int s = start_index * sizeof (u64);
 
-       /* All MTTs must fit in the same page */
-       if (start_index / (PAGE_SIZE / sizeof (u64)) !=
-           (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
-               return -EINVAL;
-
-       if (start_index & (dev->caps.mtts_per_seg - 1))
-               return -EINVAL;
+       mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
+                              start_index, &dma_handle);
 
-       mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
-                               s / dev->caps.mtt_entry_sz, &dma_handle);
        if (!mtts)
                return -ENOMEM;
 
@@ -412,27 +577,75 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
        return 0;
 }
 
-int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-                  int start_index, int npages, u64 *page_list)
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    int start_index, int npages, u64 *page_list)
 {
+       int err = 0;
        int chunk;
-       int err;
+       int mtts_per_page;
+       int max_mtts_first_page;
 
-       if (mtt->order < 0)
-               return -EINVAL;
+       /* compute how may mtts fit in the first page */
+       mtts_per_page = PAGE_SIZE / sizeof(u64);
+       max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
+                             % mtts_per_page;
+
+       chunk = min_t(int, max_mtts_first_page, npages);
 
        while (npages > 0) {
-               chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
                err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
                if (err)
                        return err;
-
                npages      -= chunk;
                start_index += chunk;
                page_list   += chunk;
+
+               chunk = min_t(int, mtts_per_page, npages);
        }
+       return err;
+}
 
-       return 0;
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                  int start_index, int npages, u64 *page_list)
+{
+       struct mlx4_cmd_mailbox *mailbox = NULL;
+       __be64 *inbox = NULL;
+       int chunk;
+       int err = 0;
+       int i;
+
+       if (mtt->order < 0)
+               return -EINVAL;
+
+       if (mlx4_is_mfunc(dev)) {
+               mailbox = mlx4_alloc_cmd_mailbox(dev);
+               if (IS_ERR(mailbox))
+                       return PTR_ERR(mailbox);
+               inbox = mailbox->buf;
+
+               while (npages > 0) {
+                       chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
+                                     npages);
+                       inbox[0] = cpu_to_be64(mtt->offset + start_index);
+                       inbox[1] = 0;
+                       for (i = 0; i < chunk; ++i)
+                               inbox[i + 2] = cpu_to_be64(page_list[i] |
+                                              MLX4_MTT_FLAG_PRESENT);
+                       err = mlx4_WRITE_MTT(dev, mailbox, chunk);
+                       if (err) {
+                               mlx4_free_cmd_mailbox(dev, mailbox);
+                               return err;
+                       }
+
+                       npages      -= chunk;
+                       start_index += chunk;
+                       page_list   += chunk;
+               }
+               mlx4_free_cmd_mailbox(dev, mailbox);
+               return err;
+       }
+
+       return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
 }
 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
 
@@ -462,21 +675,34 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
 
 int mlx4_init_mr_table(struct mlx4_dev *dev)
 {
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_mr_table *mr_table = &priv->mr_table;
        int err;
 
+       if (!is_power_of_2(dev->caps.num_mpts))
+               return -EINVAL;
+
+       /* Nothing to do for slaves - all MR handling is forwarded
+       * to the master */
+       if (mlx4_is_slave(dev))
+               return 0;
+
        err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
                               ~0, dev->caps.reserved_mrws, 0);
        if (err)
                return err;
 
        err = mlx4_buddy_init(&mr_table->mtt_buddy,
-                             ilog2(dev->caps.num_mtt_segs));
+                             ilog2(dev->caps.num_mtts /
+                             (1 << log_mtts_per_seg)));
        if (err)
                goto err_buddy;
 
        if (dev->caps.reserved_mtts) {
-               if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
+               priv->reserved_mtts =
+                       mlx4_alloc_mtt_range(dev,
+                                            fls(dev->caps.reserved_mtts - 1));
+               if (priv->reserved_mtts < 0) {
                        mlx4_warn(dev, "MTT table of order %d is too small.\n",
                                  mr_table->mtt_buddy.max_order);
                        err = -ENOMEM;
@@ -497,8 +723,14 @@ err_buddy:
 
 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
 {
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_mr_table *mr_table = &priv->mr_table;
 
+       if (mlx4_is_slave(dev))
+               return;
+       if (priv->reserved_mtts >= 0)
+               mlx4_free_mtt_range(dev, priv->reserved_mtts,
+                                   fls(dev->caps.reserved_mtts - 1));
        mlx4_buddy_cleanup(&mr_table->mtt_buddy);
        mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
 }
@@ -581,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
                   int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       u64 mtt_seg;
+       u64 mtt_offset;
        int err = -ENOMEM;
 
        if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
@@ -601,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
        if (err)
                return err;
 
-       mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
+       mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
 
        fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
-                                   fmr->mr.mtt.first_seg,
+                                   fmr->mr.mtt.offset,
                                    &fmr->dma_handle);
+
        if (!fmr->mtts) {
                err = -ENOMEM;
                goto err_free;
@@ -619,6 +852,46 @@ err_free:
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
 
+static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
+                           u32 pd, u32 access, int max_pages,
+                           int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err = -ENOMEM;
+
+       if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
+               return -EINVAL;
+
+       /* All MTTs must fit in the same page */
+       if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
+               return -EINVAL;
+
+       fmr->page_shift = page_shift;
+       fmr->max_pages  = max_pages;
+       fmr->max_maps   = max_maps;
+       fmr->maps = 0;
+
+       err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
+                                    page_shift, &fmr->mr);
+       if (err)
+               return err;
+
+       fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
+                                   fmr->mr.mtt.offset,
+                                   &fmr->dma_handle);
+       if (!fmr->mtts) {
+               err = -ENOMEM;
+               goto err_free;
+       }
+
+       return 0;
+
+err_free:
+       mlx4_mr_free_reserved(dev, &fmr->mr);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
+
 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -640,12 +913,32 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
                    u32 *lkey, u32 *rkey)
 {
+       struct mlx4_cmd_mailbox *mailbox;
+       int err;
+
        if (!fmr->maps)
                return;
 
        fmr->maps = 0;
 
-       *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
+                      " failed (%d)\n", err);
+               return;
+       }
+
+       err = mlx4_HW2SW_MPT(dev, NULL,
+                            key_to_hw_index(fmr->mr.key) &
+                            (dev->caps.num_mpts - 1));
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       if (err) {
+               printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
+                      err);
+               return;
+       }
+       fmr->mr.enabled = MLX4_MR_EN_SW;
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
 
@@ -654,15 +947,28 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
        if (fmr->maps)
                return -EBUSY;
 
-       fmr->mr.enabled = 0;
        mlx4_mr_free(dev, &fmr->mr);
+       fmr->mr.enabled = MLX4_MR_DISABLED;
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
 
+static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+       if (fmr->maps)
+               return -EBUSY;
+
+       mlx4_mr_free_reserved(dev, &fmr->mr);
+       fmr->mr.enabled = MLX4_MR_DISABLED;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
+
 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
+                       MLX4_CMD_WRAPPED);
 }
 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
index 260ed25..5c9a54d 100644 (file)
@@ -31,6 +31,7 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/io-mapping.h>
@@ -51,7 +52,8 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
        *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
        if (*pdn == -1)
                return -ENOMEM;
-
+       if (mlx4_is_mfunc(dev))
+               *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS;
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
@@ -85,7 +87,8 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
 
        return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
-                               (1 << 24) - 1, dev->caps.reserved_pds, 0);
+                               (1 << NOT_MASKED_PD_BITS) - 1,
+                                dev->caps.reserved_pds, 0);
 }
 
 void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -108,13 +111,19 @@ void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
 
 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
 {
+       int offset;
+
        uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
        if (uar->index == -1)
                return -ENOMEM;
 
-       uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+       if (mlx4_is_slave(dev))
+               offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+                                      dev->caps.uar_page_size);
+       else
+               offset = uar->index;
+       uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
        uar->map = NULL;
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
@@ -232,7 +241,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
 
        return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
                                dev->caps.num_uars, dev->caps.num_uars - 1,
-                               max(128, dev->caps.reserved_uars), 0);
+                               dev->caps.reserved_uars, 0);
 }
 
 void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
index d942aea..88b52e5 100644 (file)
@@ -70,41 +70,12 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
        table->total = 0;
 }
 
-static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
-                                  __be64 *entries)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       u32 in_mod;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
-
-       in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
-                            u64 mac, int *qpn, u8 reserve)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
 {
        struct mlx4_qp qp;
        u8 gid[16] = {0};
        int err;
 
-       if (reserve) {
-               err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
-               if (err) {
-                       mlx4_err(dev, "Failed to reserve qp for mac registration\n");
-                       return err;
-               }
-       }
        qp.qpn = *qpn;
 
        mac &= 0xffffffffffffULL;
@@ -113,16 +84,15 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
        gid[5] = port;
        gid[7] = MLX4_UC_STEER << 1;
 
-       err = mlx4_qp_attach_common(dev, &qp, gid, 0,
-                                   MLX4_PROT_ETH, MLX4_UC_STEER);
-       if (err && reserve)
-               mlx4_qp_release_range(dev, *qpn, 1);
+       err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+       if (err)
+               mlx4_warn(dev, "Failed Attaching Unicast\n");
 
        return err;
 }
 
 static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
-                                 u64 mac, int qpn, u8 free)
+                                 u64 mac, int qpn)
 {
        struct mlx4_qp qp;
        u8 gid[16] = {0};
@@ -134,60 +104,164 @@ static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
        gid[5] = port;
        gid[7] = MLX4_UC_STEER << 1;
 
-       mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
-       if (free)
-               mlx4_qp_release_range(dev, qpn, 1);
+       mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+}
+
+static int validate_index(struct mlx4_dev *dev,
+                         struct mlx4_mac_table *table, int index)
+{
+       int err = 0;
+
+       if (index < 0 || index >= table->max || !table->entries[index]) {
+               mlx4_warn(dev, "No valid Mac entry for the given index\n");
+               err = -EINVAL;
+       }
+       return err;
+}
+
+static int find_index(struct mlx4_dev *dev,
+                     struct mlx4_mac_table *table, u64 mac)
+{
+       int i;
+
+       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+               if ((mac & MLX4_MAC_MASK) ==
+                   (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+                       return i;
+       }
+       /* Mac not found */
+       return -EINVAL;
 }
 
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
 {
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-       struct mlx4_mac_table *table = &info->mac_table;
        struct mlx4_mac_entry *entry;
-       int i, err = 0;
-       int free = -1;
+       int index = 0;
+       int err = 0;
 
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
-               err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
-               if (err)
-                       return err;
+       mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
+                       (unsigned long long) mac);
+       index = mlx4_register_mac(dev, port, mac);
+       if (index < 0) {
+               err = index;
+               mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+                        (unsigned long long) mac);
+               return err;
+       }
 
-               entry = kmalloc(sizeof *entry, GFP_KERNEL);
-               if (!entry) {
-                       mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
-                       return -ENOMEM;
-               }
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+               *qpn = info->base_qpn + index;
+               return 0;
+       }
+
+       err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+       mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
+       if (err) {
+               mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+               goto qp_err;
+       }
+
+       err = mlx4_uc_steer_add(dev, port, mac, qpn);
+       if (err)
+               goto steer_err;
 
-               entry->mac = mac;
-               err = radix_tree_insert(&info->mac_tree, *qpn, entry);
-               if (err) {
+       entry = kmalloc(sizeof *entry, GFP_KERNEL);
+       if (!entry) {
+               err = -ENOMEM;
+               goto alloc_err;
+       }
+       entry->mac = mac;
+       err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+       if (err)
+               goto insert_err;
+       return 0;
+
+insert_err:
+       kfree(entry);
+
+alloc_err:
+       mlx4_uc_steer_release(dev, port, mac, *qpn);
+
+steer_err:
+       mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+       mlx4_unregister_mac(dev, port, mac);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
+
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
+{
+       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+       struct mlx4_mac_entry *entry;
+
+       mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
+                (unsigned long long) mac);
+       mlx4_unregister_mac(dev, port, mac);
+
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+               entry = radix_tree_lookup(&info->mac_tree, qpn);
+               if (entry) {
+                       mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
+                                " qpn %d\n", port,
+                                (unsigned long long) mac, qpn);
+                       mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+                       mlx4_qp_release_range(dev, qpn, 1);
+                       radix_tree_delete(&info->mac_tree, qpn);
                        kfree(entry);
-                       mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
-                       return err;
                }
        }
+}
+EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+                                  __be64 *entries)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 in_mod;
+       int err;
 
-       mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+       in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+       struct mlx4_mac_table *table = &info->mac_table;
+       int i, err = 0;
+       int free = -1;
+
+       mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
+                (unsigned long long) mac, port);
 
        mutex_lock(&table->mutex);
-       for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
-               if (free < 0 && !table->refs[i]) {
+       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+               if (free < 0 && !table->entries[i]) {
                        free = i;
                        continue;
                }
 
                if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
-                       /* MAC already registered, increase references count */
-                       ++table->refs[i];
+                       /* MAC already registered, Must not have duplicates */
+                       err = -EEXIST;
                        goto out;
                }
        }
 
-       if (free < 0) {
-               err = -ENOMEM;
-               goto out;
-       }
-
        mlx4_dbg(dev, "Free MAC index is %d\n", free);
 
        if (table->total == table->max) {
@@ -197,103 +271,103 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
        }
 
        /* Register new MAC */
-       table->refs[free] = 1;
        table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
 
        err = mlx4_set_port_mac_table(dev, port, table->entries);
        if (unlikely(err)) {
-               mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
-               table->refs[free] = 0;
+               mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+                        (unsigned long long) mac);
                table->entries[free] = 0;
                goto out;
        }
 
-       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
-               *qpn = info->base_qpn + free;
+       err = free;
        ++table->total;
 out:
        mutex_unlock(&table->mutex);
        return err;
 }
-EXPORT_SYMBOL_GPL(mlx4_register_mac);
+EXPORT_SYMBOL_GPL(__mlx4_register_mac);
 
-static int validate_index(struct mlx4_dev *dev,
-                         struct mlx4_mac_table *table, int index)
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 {
-       int err = 0;
+       u64 out_param;
+       int err;
 
-       if (index < 0 || index >= table->max || !table->entries[index]) {
-               mlx4_warn(dev, "No valid Mac entry for the given index\n");
-               err = -EINVAL;
-       }
-       return err;
-}
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&out_param, port);
+               err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       return err;
 
-static int find_index(struct mlx4_dev *dev,
-                     struct mlx4_mac_table *table, u64 mac)
-{
-       int i;
-       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-               if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
-                       return i;
+               return get_param_l(&out_param);
        }
-       /* Mac not found */
-       return -EINVAL;
+       return __mlx4_register_mac(dev, port, mac);
 }
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
 
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 {
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
        struct mlx4_mac_table *table = &info->mac_table;
-       int index = qpn - info->base_qpn;
-       struct mlx4_mac_entry *entry;
+       int index;
 
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
-               entry = radix_tree_lookup(&info->mac_tree, qpn);
-               if (entry) {
-                       mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
-                       radix_tree_delete(&info->mac_tree, qpn);
-                       index = find_index(dev, table, entry->mac);
-                       kfree(entry);
-               }
-       }
+       index = find_index(dev, table, mac);
 
        mutex_lock(&table->mutex);
 
        if (validate_index(dev, table, index))
                goto out;
 
-       /* Check whether this address has reference count */
-       if (!(--table->refs[index])) {
-               table->entries[index] = 0;
-               mlx4_set_port_mac_table(dev, port, table->entries);
-               --table->total;
-       }
+       table->entries[index] = 0;
+       mlx4_set_port_mac_table(dev, port, table->entries);
+       --table->total;
 out:
        mutex_unlock(&table->mutex);
 }
+EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&out_param, port);
+               err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               return;
+       }
+       __mlx4_unregister_mac(dev, port, mac);
+       return;
+}
 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
 
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
 {
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
        struct mlx4_mac_table *table = &info->mac_table;
-       int index = qpn - info->base_qpn;
        struct mlx4_mac_entry *entry;
-       int err;
+       int index = qpn - info->base_qpn;
+       int err = 0;
 
        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
                entry = radix_tree_lookup(&info->mac_tree, qpn);
                if (!entry)
                        return -EINVAL;
-               index = find_index(dev, table, entry->mac);
-               mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+               mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+               mlx4_unregister_mac(dev, port, entry->mac);
                entry->mac = new_mac;
-               err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
-               if (err || index < 0)
-                       return err;
+               mlx4_register_mac(dev, port, new_mac);
+               err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+               return err;
        }
 
+       /* CX1 doesn't support multi-functions */
        mutex_lock(&table->mutex);
 
        err = validate_index(dev, table, index);
@@ -304,7 +378,8 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
 
        err = mlx4_set_port_mac_table(dev, port, table->entries);
        if (unlikely(err)) {
-               mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+               mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+                        (unsigned long long) new_mac);
                table->entries[index] = 0;
        }
 out:
@@ -312,6 +387,7 @@ out:
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+
 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
                                    __be32 *entries)
 {
@@ -326,7 +402,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
        memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
        in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
        err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -352,7 +428,8 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
 }
 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
 
-int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
+                               int *index)
 {
        struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
        int i, err = 0;
@@ -387,7 +464,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
                goto out;
        }
 
-       /* Register new MAC */
+       /* Register new VLAN */
        table->refs[free] = 1;
        table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
 
@@ -405,9 +482,27 @@ out:
        mutex_unlock(&table->mutex);
        return err;
 }
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&out_param, port);
+               err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (!err)
+                       *index = get_param_l(&out_param);
+
+               return err;
+       }
+       return __mlx4_register_vlan(dev, port, vlan, index);
+}
 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
 
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
 {
        struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
 
@@ -432,6 +527,25 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
 out:
        mutex_unlock(&table->mutex);
 }
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, port);
+               err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
+                              MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+                              MLX4_CMD_WRAPPED);
+               if (!err)
+                       mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
+                                       index);
+
+               return;
+       }
+       __mlx4_unregister_vlan(dev, port, index);
+}
 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
 
 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
@@ -462,7 +576,8 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
        *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
 
        err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
-                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_NATIVE);
        if (!err)
                *caps = *(__be32 *) (outbuf + 84);
        mlx4_free_cmd_mailbox(dev, inmailbox);
@@ -499,7 +614,8 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
        *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
 
        err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
-                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_NATIVE);
 
        packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
 
@@ -512,6 +628,139 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
        return err;
 }
 
+static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
+                               u8 op_mod, struct mlx4_cmd_mailbox *inbox)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_port_info *port_info;
+       struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+       struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+       struct mlx4_set_port_rqp_calc_context *qpn_context;
+       struct mlx4_set_port_general_context *gen_context;
+       int reset_qkey_viols;
+       int port;
+       int is_eth;
+       u32 in_modifier;
+       u32 promisc;
+       u16 mtu, prev_mtu;
+       int err;
+       int i;
+       __be32 agg_cap_mask;
+       __be32 slave_cap_mask;
+       __be32 new_cap_mask;
+
+       port = in_mod & 0xff;
+       in_modifier = in_mod >> 8;
+       is_eth = op_mod;
+       port_info = &priv->port[port];
+
+       /* Slaves cannot perform SET_PORT operations except changing MTU */
+       if (is_eth) {
+               if (slave != dev->caps.function &&
+                   in_modifier != MLX4_SET_PORT_GENERAL) {
+                       mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
+                                       slave);
+                       return -EINVAL;
+               }
+               switch (in_modifier) {
+               case MLX4_SET_PORT_RQP_CALC:
+                       qpn_context = inbox->buf;
+                       qpn_context->base_qpn =
+                               cpu_to_be32(port_info->base_qpn);
+                       qpn_context->n_mac = 0x7;
+                       promisc = be32_to_cpu(qpn_context->promisc) >>
+                               SET_PORT_PROMISC_SHIFT;
+                       qpn_context->promisc = cpu_to_be32(
+                               promisc << SET_PORT_PROMISC_SHIFT |
+                               port_info->base_qpn);
+                       promisc = be32_to_cpu(qpn_context->mcast) >>
+                               SET_PORT_MC_PROMISC_SHIFT;
+                       qpn_context->mcast = cpu_to_be32(
+                               promisc << SET_PORT_MC_PROMISC_SHIFT |
+                               port_info->base_qpn);
+                       break;
+               case MLX4_SET_PORT_GENERAL:
+                       gen_context = inbox->buf;
+                       /* Mtu is configured as the max MTU among all the
+                        * the functions on the port. */
+                       mtu = be16_to_cpu(gen_context->mtu);
+                       mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
+                       prev_mtu = slave_st->mtu[port];
+                       slave_st->mtu[port] = mtu;
+                       if (mtu > master->max_mtu[port])
+                               master->max_mtu[port] = mtu;
+                       if (mtu < prev_mtu && prev_mtu ==
+                                               master->max_mtu[port]) {
+                               slave_st->mtu[port] = mtu;
+                               master->max_mtu[port] = mtu;
+                               for (i = 0; i < dev->num_slaves; i++) {
+                                       master->max_mtu[port] =
+                                       max(master->max_mtu[port],
+                                           master->slave_state[i].mtu[port]);
+                               }
+                       }
+
+                       gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+                       break;
+               }
+               return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
+                               MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                               MLX4_CMD_NATIVE);
+       }
+
+       /* For IB, we only consider:
+        * - The capability mask, which is set to the aggregate of all
+        *   slave function capabilities
+        * - The QKey violatin counter - reset according to each request.
+        */
+
+       if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+               reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
+               new_cap_mask = ((__be32 *) inbox->buf)[2];
+       } else {
+               reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
+               new_cap_mask = ((__be32 *) inbox->buf)[1];
+       }
+
+       agg_cap_mask = 0;
+       slave_cap_mask =
+               priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+       priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
+       for (i = 0; i < dev->num_slaves; i++)
+               agg_cap_mask |=
+                       priv->mfunc.master.slave_state[i].ib_cap_mask[port];
+
+       /* only clear mailbox for guests.  Master may be setting
+       * MTU or PKEY table size
+       */
+       if (slave != dev->caps.function)
+               memset(inbox->buf, 0, 256);
+       if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+               *(u8 *) inbox->buf         = !!reset_qkey_viols << 6;
+               ((__be32 *) inbox->buf)[2] = agg_cap_mask;
+       } else {
+               ((u8 *) inbox->buf)[3]     = !!reset_qkey_viols;
+               ((__be32 *) inbox->buf)[1] = agg_cap_mask;
+       }
+
+       err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+       if (err)
+               priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
+                       slave_cap_mask;
+       return err;
+}
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
+                                   vhcr->op_modifier, inbox);
+}
+
 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -528,8 +777,127 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
 
        ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
        err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+                         u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_general_context *context;
+       int err;
+       u32 in_mod;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+       memset(context, 0, sizeof *context);
+
+       context->flags = SET_PORT_GEN_ALL_VALID;
+       context->mtu = cpu_to_be16(mtu);
+       context->pptx = (pptx * (!pfctx)) << 7;
+       context->pfctx = pfctx;
+       context->pprx = (pprx * (!pfcrx)) << 7;
+       context->pfcrx = pfcrx;
+
+       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_general);
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+                          u8 promisc)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_rqp_calc_context *context;
+       int err;
+       u32 in_mod;
+       u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+               MCAST_DIRECT : MCAST_DEFAULT;
+
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
+           dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+               return 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+       memset(context, 0, sizeof *context);
+
+       context->base_qpn = cpu_to_be32(base_qpn);
+       context->n_mac = dev->caps.log_num_macs;
+       context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+                                      base_qpn);
+       context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+                                    base_qpn);
+       context->intra_no_vlan = 0;
+       context->no_vlan = MLX4_NO_VLAN_IDX;
+       context->intra_vlan_miss = 0;
+       context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+       in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
+
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd)
+{
+       int err = 0;
+
+       return err;
+}
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+                       u64 mac, u64 clear, u8 mode)
+{
+       return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+                       MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
+                       MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
+
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd)
+{
+       int err = 0;
+
+       return err;
+}
+
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
+                              u32 in_mod, struct mlx4_cmd_mailbox *outbox)
+{
+       return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
+                           MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+                           MLX4_CMD_NATIVE);
+}
+
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd)
+{
+       return mlx4_common_dump_eth_stats(dev, slave,
+                                         vhcr->in_modifier, outbox);
+}
index b967647..66f91ca 100644 (file)
@@ -98,8 +98,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
        profile[MLX4_RES_EQ].size     = dev_cap->eqc_entry_sz;
        profile[MLX4_RES_DMPT].size   = dev_cap->dmpt_entry_sz;
        profile[MLX4_RES_CMPT].size   = dev_cap->cmpt_entry_sz;
-       profile[MLX4_RES_MTT].size    = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
-       profile[MLX4_RES_MCG].size    = MLX4_MGM_ENTRY_SIZE;
+       profile[MLX4_RES_MTT].size    = dev_cap->mtt_entry_sz;
+       profile[MLX4_RES_MCG].size    = mlx4_get_mgm_entry_size(dev);
 
        profile[MLX4_RES_QP].num      = request->num_qp;
        profile[MLX4_RES_RDMARC].num  = request->num_qp * request->rdmarc_per_qp;
@@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                        init_hca->cmpt_base      = profile[i].start;
                        break;
                case MLX4_RES_MTT:
-                       dev->caps.num_mtt_segs   = profile[i].num;
+                       dev->caps.num_mtts       = profile[i].num;
                        priv->mr_table.mtt_base  = profile[i].start;
                        init_hca->mtt_base       = profile[i].start;
                        break;
@@ -218,7 +218,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                        dev->caps.num_mgms        = profile[i].num >> 1;
                        dev->caps.num_amgms       = profile[i].num >> 1;
                        init_hca->mc_base         = profile[i].start;
-                       init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+                       init_hca->log_mc_entry_sz =
+                                       ilog2(mlx4_get_mgm_entry_size(dev));
                        init_hca->log_mc_table_sz = profile[i].log_num;
                        init_hca->log_mc_hash_sz  = profile[i].log_num - 1;
                        break;
index 15f870c..6b03ac8 100644 (file)
@@ -35,6 +35,8 @@
 
 #include <linux/gfp.h>
 #include <linux/export.h>
+#include <linux/init.h>
+
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/qp.h>
 
@@ -55,7 +57,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
        spin_unlock(&qp_table->lock);
 
        if (!qp) {
-               mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+               mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
                return;
        }
 
@@ -65,10 +67,17 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
                complete(&qp->free);
 }
 
-int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-                  enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
-                  struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
-                  int sqd_event, struct mlx4_qp *qp)
+static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+       return qp->qpn >= dev->caps.sqp_start &&
+               qp->qpn <= dev->caps.sqp_start + 1;
+}
+
+static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+                    struct mlx4_qp_context *context,
+                    enum mlx4_qp_optpar optpar,
+                    int sqd_event, struct mlx4_qp *qp, int native)
 {
        static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
                [MLX4_QP_STATE_RST] = {
@@ -110,16 +119,26 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                }
        };
 
+       struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_cmd_mailbox *mailbox;
        int ret = 0;
+       u8 port;
 
        if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
            !op[cur_state][new_state])
                return -EINVAL;
 
-       if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
-               return mlx4_cmd(dev, 0, qp->qpn, 2,
-                               MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+       if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
+               ret = mlx4_cmd(dev, 0, qp->qpn, 2,
+                       MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
+               if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
+                   cur_state != MLX4_QP_STATE_RST &&
+                   is_qp0(dev, qp)) {
+                       port = (qp->qpn & 1) + 1;
+                       priv->mfunc.master.qp0_state[port].qp0_active = 0;
+               }
+               return ret;
+       }
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
@@ -132,107 +151,218 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
        }
 
+       port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
+       if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+               context->pri_path.sched_queue = (context->pri_path.sched_queue &
+                                               0xc3);
+
        *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
        memcpy(mailbox->buf + 8, context, sizeof *context);
 
        ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
                cpu_to_be32(qp->qpn);
 
-       ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+       ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function,
+                      qp->qpn | (!!sqd_event << 31),
                       new_state == MLX4_QP_STATE_RST ? 2 : 0,
-                      op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+                      op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return ret;
 }
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                  enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+                  struct mlx4_qp_context *context,
+                  enum mlx4_qp_optpar optpar,
+                  int sqd_event, struct mlx4_qp *qp)
+{
+       return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
+                               optpar, sqd_event, qp, 0);
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_modify);
 
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+                                  int *base)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_qp_table *qp_table = &priv->qp_table;
-       int qpn;
 
-       qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
-       if (qpn == -1)
+       *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+       if (*base == -1)
                return -ENOMEM;
 
-       *base = qpn;
        return 0;
 }
+
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+       u64 in_param;
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, cnt);
+               set_param_h(&in_param, align);
+               err = mlx4_cmd_imm(dev, in_param, &out_param,
+                                  RES_QP, RES_OP_RESERVE,
+                                  MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       return err;
+
+               *base = get_param_l(&out_param);
+               return 0;
+       }
+       return __mlx4_qp_reserve_range(dev, cnt, align, base);
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
 
-void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_qp_table *qp_table = &priv->qp_table;
-       if (base_qpn < dev->caps.sqp_start + 8)
-               return;
 
+       if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
+               return;
        mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
 }
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, base_qpn);
+               set_param_h(&in_param, cnt);
+               err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
+                              MLX4_CMD_FREE_RES,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err) {
+                       mlx4_warn(dev, "Failed to release qp range"
+                                 " base:%d cnt:%d\n", base_qpn, cnt);
+               }
+       } else
+                __mlx4_qp_release_range(dev, base_qpn, cnt);
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
 
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_qp_table *qp_table = &priv->qp_table;
        int err;
 
-       if (!qpn)
-               return -EINVAL;
-
-       qp->qpn = qpn;
-
-       err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
        if (err)
                goto err_out;
 
-       err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
        if (err)
                goto err_put_qp;
 
-       err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
        if (err)
                goto err_put_auxc;
 
-       err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
        if (err)
                goto err_put_altc;
 
-       err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
        if (err)
                goto err_put_rdmarc;
 
-       spin_lock_irq(&qp_table->lock);
-       err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
-       spin_unlock_irq(&qp_table->lock);
-       if (err)
-               goto err_put_cmpt;
-
-       atomic_set(&qp->refcount, 1);
-       init_completion(&qp->free);
-
        return 0;
 
-err_put_cmpt:
-       mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-
 err_put_rdmarc:
-       mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
 
 err_put_altc:
-       mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->altc_table, qpn);
 
 err_put_auxc:
-       mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->auxc_table, qpn);
 
 err_put_qp:
-       mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->qp_table, qpn);
 
 err_out:
        return err;
 }
+
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+{
+       u64 param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&param, qpn);
+               return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
+                                   MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
+                                   MLX4_CMD_WRAPPED);
+       }
+       return __mlx4_qp_alloc_icm(dev, qpn);
+}
+
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_qp_table *qp_table = &priv->qp_table;
+
+       mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
+       mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
+       mlx4_table_put(dev, &qp_table->altc_table, qpn);
+       mlx4_table_put(dev, &qp_table->auxc_table, qpn);
+       mlx4_table_put(dev, &qp_table->qp_table, qpn);
+}
+
+static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, qpn);
+               if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
+                            MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+                            MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
+       } else
+               __mlx4_qp_free_icm(dev, qpn);
+}
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_qp_table *qp_table = &priv->qp_table;
+       int err;
+
+       if (!qpn)
+               return -EINVAL;
+
+       qp->qpn = qpn;
+
+       err = mlx4_qp_alloc_icm(dev, qpn);
+       if (err)
+               return err;
+
+       spin_lock_irq(&qp_table->lock);
+       err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
+                               (dev->caps.num_qps - 1), qp);
+       spin_unlock_irq(&qp_table->lock);
+       if (err)
+               goto err_icm;
+
+       atomic_set(&qp->refcount, 1);
+       init_completion(&qp->free);
+
+       return 0;
+
+err_icm:
+       mlx4_qp_free_icm(dev, qpn);
+       return err;
+}
+
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
@@ -248,24 +378,18 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
 
 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
-       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-
        if (atomic_dec_and_test(&qp->refcount))
                complete(&qp->free);
        wait_for_completion(&qp->free);
 
-       mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+       mlx4_qp_free_icm(dev, qp->qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_free);
 
 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
 {
        return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
-                       MLX4_CMD_TIME_CLASS_B);
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_init_qp_table(struct mlx4_dev *dev)
@@ -276,6 +400,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
 
        spin_lock_init(&qp_table->lock);
        INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+       if (mlx4_is_slave(dev))
+               return 0;
 
        /*
         * We reserve 2 extra QPs per port for the special QPs.  The
@@ -327,6 +453,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
 {
+       if (mlx4_is_slave(dev))
+               return;
+
        mlx4_CONF_SPECIAL_QP(dev, 0);
        mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
 }
@@ -342,7 +471,8 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
                return PTR_ERR(mailbox);
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
-                          MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
+                          MLX4_CMD_WRAPPED);
        if (!err)
                memcpy(context, mailbox->buf + 8, sizeof *context);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
new file mode 100644 (file)
index 0000000..b41762d
--- /dev/null
@@ -0,0 +1,3103 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+#define MLX4_MAC_VALID         (1ull << 63)
+#define MLX4_MAC_MASK          0x7fffffffffffffffULL
+#define ETH_ALEN               6
+
+struct mac_res {
+       struct list_head list;
+       u64 mac;
+       u8 port;
+};
+
+struct res_common {
+       struct list_head        list;
+       u32                     res_id;
+       int                     owner;
+       int                     state;
+       int                     from_state;
+       int                     to_state;
+       int                     removing;
+};
+
+enum {
+       RES_ANY_BUSY = 1
+};
+
+struct res_gid {
+       struct list_head        list;
+       u8                      gid[16];
+       enum mlx4_protocol      prot;
+};
+
+enum res_qp_states {
+       RES_QP_BUSY = RES_ANY_BUSY,
+
+       /* QP number was allocated */
+       RES_QP_RESERVED,
+
+       /* ICM memory for QP context was mapped */
+       RES_QP_MAPPED,
+
+       /* QP is in hw ownership */
+       RES_QP_HW
+};
+
+static inline const char *qp_states_str(enum res_qp_states state)
+{
+       switch (state) {
+       case RES_QP_BUSY: return "RES_QP_BUSY";
+       case RES_QP_RESERVED: return "RES_QP_RESERVED";
+       case RES_QP_MAPPED: return "RES_QP_MAPPED";
+       case RES_QP_HW: return "RES_QP_HW";
+       default: return "Unknown";
+       }
+}
+
+struct res_qp {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       struct res_cq          *rcq;
+       struct res_cq          *scq;
+       struct res_srq         *srq;
+       struct list_head        mcg_list;
+       spinlock_t              mcg_spl;
+       int                     local_qpn;
+};
+
+enum res_mtt_states {
+       RES_MTT_BUSY = RES_ANY_BUSY,
+       RES_MTT_ALLOCATED,
+};
+
+static inline const char *mtt_states_str(enum res_mtt_states state)
+{
+       switch (state) {
+       case RES_MTT_BUSY: return "RES_MTT_BUSY";
+       case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
+       default: return "Unknown";
+       }
+}
+
+struct res_mtt {
+       struct res_common       com;
+       int                     order;
+       atomic_t                ref_count;
+};
+
+enum res_mpt_states {
+       RES_MPT_BUSY = RES_ANY_BUSY,
+       RES_MPT_RESERVED,
+       RES_MPT_MAPPED,
+       RES_MPT_HW,
+};
+
+struct res_mpt {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       int                     key;
+};
+
+enum res_eq_states {
+       RES_EQ_BUSY = RES_ANY_BUSY,
+       RES_EQ_RESERVED,
+       RES_EQ_HW,
+};
+
+struct res_eq {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+};
+
+enum res_cq_states {
+       RES_CQ_BUSY = RES_ANY_BUSY,
+       RES_CQ_ALLOCATED,
+       RES_CQ_HW,
+};
+
+struct res_cq {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       atomic_t                ref_count;
+};
+
+enum res_srq_states {
+       RES_SRQ_BUSY = RES_ANY_BUSY,
+       RES_SRQ_ALLOCATED,
+       RES_SRQ_HW,
+};
+
+static inline const char *srq_states_str(enum res_srq_states state)
+{
+       switch (state) {
+       case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
+       case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
+       case RES_SRQ_HW: return "RES_SRQ_HW";
+       default: return "Unknown";
+       }
+}
+
+struct res_srq {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       struct res_cq          *cq;
+       atomic_t                ref_count;
+};
+
+enum res_counter_states {
+       RES_COUNTER_BUSY = RES_ANY_BUSY,
+       RES_COUNTER_ALLOCATED,
+};
+
+static inline const char *counter_states_str(enum res_counter_states state)
+{
+       switch (state) {
+       case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
+       case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
+       default: return "Unknown";
+       }
+}
+
+struct res_counter {
+       struct res_common       com;
+       int                     port;
+};
+
+/* For Debug uses */
+static const char *ResourceType(enum mlx4_resource rt)
+{
+       switch (rt) {
+       case RES_QP: return "RES_QP";
+       case RES_CQ: return "RES_CQ";
+       case RES_SRQ: return "RES_SRQ";
+       case RES_MPT: return "RES_MPT";
+       case RES_MTT: return "RES_MTT";
+       case RES_MAC: return  "RES_MAC";
+       case RES_EQ: return "RES_EQ";
+       case RES_COUNTER: return "RES_COUNTER";
+       default: return "Unknown resource type !!!";
+       };
+}
+
+int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i;
+       int t;
+
+       priv->mfunc.master.res_tracker.slave_list =
+               kzalloc(dev->num_slaves * sizeof(struct slave_list),
+                       GFP_KERNEL);
+       if (!priv->mfunc.master.res_tracker.slave_list)
+               return -ENOMEM;
+
+       for (i = 0 ; i < dev->num_slaves; i++) {
+               for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
+                       INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
+                                      slave_list[i].res_list[t]);
+               mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+       }
+
+       mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
+                dev->num_slaves);
+       for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
+               INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
+                               GFP_ATOMIC|__GFP_NOWARN);
+
+       spin_lock_init(&priv->mfunc.master.res_tracker.lock);
+       return 0 ;
+}
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i;
+
+       if (priv->mfunc.master.res_tracker.slave_list) {
+               for (i = 0 ; i < dev->num_slaves; i++)
+                       mlx4_delete_all_resources_for_slave(dev, i);
+
+               kfree(priv->mfunc.master.res_tracker.slave_list);
+       }
+}
+
+static void update_ud_gid(struct mlx4_dev *dev,
+                         struct mlx4_qp_context *qp_ctx, u8 slave)
+{
+       u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+
+       if (MLX4_QP_ST_UD == ts)
+               qp_ctx->pri_path.mgid_index = 0x80 | slave;
+
+       mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
+               slave, qp_ctx->pri_path.mgid_index);
+}
+
+static int mpt_mask(struct mlx4_dev *dev)
+{
+       return dev->caps.num_mpts - 1;
+}
+
+static void *find_res(struct mlx4_dev *dev, int res_id,
+                     enum mlx4_resource type)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+                                res_id);
+}
+
+static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+                  enum mlx4_resource type,
+                  void *res)
+{
+       struct res_common *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = find_res(dev, res_id, type);
+       if (!r) {
+               err = -ENONET;
+               goto exit;
+       }
+
+       if (r->state == RES_ANY_BUSY) {
+               err = -EBUSY;
+               goto exit;
+       }
+
+       if (r->owner != slave) {
+               err = -EPERM;
+               goto exit;
+       }
+
+       r->from_state = r->state;
+       r->state = RES_ANY_BUSY;
+       mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+                ResourceType(type), r->res_id);
+
+       if (res)
+               *((struct res_common **)res) = r;
+
+exit:
+       spin_unlock_irq(mlx4_tlock(dev));
+       return err;
+}
+
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+                                   enum mlx4_resource type,
+                                   int res_id, int *slave)
+{
+
+       struct res_common *r;
+       int err = -ENOENT;
+       int id = res_id;
+
+       if (type == RES_QP)
+               id &= 0x7fffff;
+       spin_lock(mlx4_tlock(dev));
+
+       r = find_res(dev, id, type);
+       if (r) {
+               *slave = r->owner;
+               err = 0;
+       }
+       spin_unlock(mlx4_tlock(dev));
+
+       return err;
+}
+
+static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+                   enum mlx4_resource type)
+{
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = find_res(dev, res_id, type);
+       if (r)
+               r->state = r->from_state;
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static struct res_common *alloc_qp_tr(int id)
+{
+       struct res_qp *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_QP_RESERVED;
+       INIT_LIST_HEAD(&ret->mcg_list);
+       spin_lock_init(&ret->mcg_spl);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_mtt_tr(int id, int order)
+{
+       struct res_mtt *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->order = order;
+       ret->com.state = RES_MTT_ALLOCATED;
+       atomic_set(&ret->ref_count, 0);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_mpt_tr(int id, int key)
+{
+       struct res_mpt *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_MPT_RESERVED;
+       ret->key = key;
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_eq_tr(int id)
+{
+       struct res_eq *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_EQ_RESERVED;
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_cq_tr(int id)
+{
+       struct res_cq *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_CQ_ALLOCATED;
+       atomic_set(&ret->ref_count, 0);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_srq_tr(int id)
+{
+       struct res_srq *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_SRQ_ALLOCATED;
+       atomic_set(&ret->ref_count, 0);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_counter_tr(int id)
+{
+       struct res_counter *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_COUNTER_ALLOCATED;
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+                                  int extra)
+{
+       struct res_common *ret;
+
+       switch (type) {
+       case RES_QP:
+               ret = alloc_qp_tr(id);
+               break;
+       case RES_MPT:
+               ret = alloc_mpt_tr(id, extra);
+               break;
+       case RES_MTT:
+               ret = alloc_mtt_tr(id, extra);
+               break;
+       case RES_EQ:
+               ret = alloc_eq_tr(id);
+               break;
+       case RES_CQ:
+               ret = alloc_cq_tr(id);
+               break;
+       case RES_SRQ:
+               ret = alloc_srq_tr(id);
+               break;
+       case RES_MAC:
+               printk(KERN_ERR "implementation missing\n");
+               return NULL;
+       case RES_COUNTER:
+               ret = alloc_counter_tr(id);
+               break;
+
+       default:
+               return NULL;
+       }
+       if (ret)
+               ret->owner = slave;
+
+       return ret;
+}
+
+static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+                        enum mlx4_resource type, int extra)
+{
+       int i;
+       int err;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct res_common **res_arr;
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct radix_tree_root *root = &tracker->res_tree[type];
+
+       res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
+       if (!res_arr)
+               return -ENOMEM;
+
+       for (i = 0; i < count; ++i) {
+               res_arr[i] = alloc_tr(base + i, type, slave, extra);
+               if (!res_arr[i]) {
+                       for (--i; i >= 0; --i)
+                               kfree(res_arr[i]);
+
+                       kfree(res_arr);
+                       return -ENOMEM;
+               }
+       }
+
+       spin_lock_irq(mlx4_tlock(dev));
+       for (i = 0; i < count; ++i) {
+               if (find_res(dev, base + i, type)) {
+                       err = -EEXIST;
+                       goto undo;
+               }
+               err = radix_tree_insert(root, base + i, res_arr[i]);
+               if (err)
+                       goto undo;
+               list_add_tail(&res_arr[i]->list,
+                             &tracker->slave_list[slave].res_list[type]);
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+       kfree(res_arr);
+
+       return 0;
+
+undo:
+       for (--i; i >= base; --i)
+               radix_tree_delete(&tracker->res_tree[type], i);
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       for (i = 0; i < count; ++i)
+               kfree(res_arr[i]);
+
+       kfree(res_arr);
+
+       return err;
+}
+
+static int remove_qp_ok(struct res_qp *res)
+{
+       if (res->com.state == RES_QP_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_QP_RESERVED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_mtt_ok(struct res_mtt *res, int order)
+{
+       if (res->com.state == RES_MTT_BUSY ||
+           atomic_read(&res->ref_count)) {
+               printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
+                      __func__, __LINE__,
+                      mtt_states_str(res->com.state),
+                      atomic_read(&res->ref_count));
+               return -EBUSY;
+       } else if (res->com.state != RES_MTT_ALLOCATED)
+               return -EPERM;
+       else if (res->order != order)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int remove_mpt_ok(struct res_mpt *res)
+{
+       if (res->com.state == RES_MPT_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_MPT_RESERVED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_eq_ok(struct res_eq *res)
+{
+       if (res->com.state == RES_MPT_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_MPT_RESERVED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_counter_ok(struct res_counter *res)
+{
+       if (res->com.state == RES_COUNTER_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_COUNTER_ALLOCATED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_cq_ok(struct res_cq *res)
+{
+       if (res->com.state == RES_CQ_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_CQ_ALLOCATED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_srq_ok(struct res_srq *res)
+{
+       if (res->com.state == RES_SRQ_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_SRQ_ALLOCATED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
+{
+       switch (type) {
+       case RES_QP:
+               return remove_qp_ok((struct res_qp *)res);
+       case RES_CQ:
+               return remove_cq_ok((struct res_cq *)res);
+       case RES_SRQ:
+               return remove_srq_ok((struct res_srq *)res);
+       case RES_MPT:
+               return remove_mpt_ok((struct res_mpt *)res);
+       case RES_MTT:
+               return remove_mtt_ok((struct res_mtt *)res, extra);
+       case RES_MAC:
+               return -ENOSYS;
+       case RES_EQ:
+               return remove_eq_ok((struct res_eq *)res);
+       case RES_COUNTER:
+               return remove_counter_ok((struct res_counter *)res);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+                        enum mlx4_resource type, int extra)
+{
+       int i;
+       int err;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       for (i = base; i < base + count; ++i) {
+               r = radix_tree_lookup(&tracker->res_tree[type], i);
+               if (!r) {
+                       err = -ENOENT;
+                       goto out;
+               }
+               if (r->owner != slave) {
+                       err = -EPERM;
+                       goto out;
+               }
+               err = remove_ok(r, type, extra);
+               if (err)
+                       goto out;
+       }
+
+       for (i = base; i < base + count; ++i) {
+               r = radix_tree_lookup(&tracker->res_tree[type], i);
+               radix_tree_delete(&tracker->res_tree[type], i);
+               list_del(&r->list);
+               kfree(r);
+       }
+       err = 0;
+
+out:
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
+                               enum res_qp_states state, struct res_qp **qp,
+                               int alloc)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_qp *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_QP_BUSY:
+                       mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+                                __func__, r->com.res_id);
+                       err = -EBUSY;
+                       break;
+
+               case RES_QP_RESERVED:
+                       if (r->com.state == RES_QP_MAPPED && !alloc)
+                               break;
+
+                       mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+                       err = -EINVAL;
+                       break;
+
+               case RES_QP_MAPPED:
+                       if ((r->com.state == RES_QP_RESERVED && alloc) ||
+                           r->com.state == RES_QP_HW)
+                               break;
+                       else {
+                               mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+                                         r->com.res_id);
+                               err = -EINVAL;
+                       }
+
+                       break;
+
+               case RES_QP_HW:
+                       if (r->com.state != RES_QP_MAPPED)
+                               err = -EINVAL;
+                       break;
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_QP_BUSY;
+                       if (qp)
+                               *qp = (struct res_qp *)r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+                               enum res_mpt_states state, struct res_mpt **mpt)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_mpt *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_MPT_BUSY:
+                       err = -EINVAL;
+                       break;
+
+               case RES_MPT_RESERVED:
+                       if (r->com.state != RES_MPT_MAPPED)
+                               err = -EINVAL;
+                       break;
+
+               case RES_MPT_MAPPED:
+                       if (r->com.state != RES_MPT_RESERVED &&
+                           r->com.state != RES_MPT_HW)
+                               err = -EINVAL;
+                       break;
+
+               case RES_MPT_HW:
+                       if (r->com.state != RES_MPT_MAPPED)
+                               err = -EINVAL;
+                       break;
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_MPT_BUSY;
+                       if (mpt)
+                               *mpt = (struct res_mpt *)r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+                               enum res_eq_states state, struct res_eq **eq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_eq *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_EQ_BUSY:
+                       err = -EINVAL;
+                       break;
+
+               case RES_EQ_RESERVED:
+                       if (r->com.state != RES_EQ_HW)
+                               err = -EINVAL;
+                       break;
+
+               case RES_EQ_HW:
+                       if (r->com.state != RES_EQ_RESERVED)
+                               err = -EINVAL;
+                       break;
+
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_EQ_BUSY;
+                       if (eq)
+                               *eq = r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
+                               enum res_cq_states state, struct res_cq **cq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_cq *r;
+       int err;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_CQ_BUSY:
+                       err = -EBUSY;
+                       break;
+
+               case RES_CQ_ALLOCATED:
+                       if (r->com.state != RES_CQ_HW)
+                               err = -EINVAL;
+                       else if (atomic_read(&r->ref_count))
+                               err = -EBUSY;
+                       else
+                               err = 0;
+                       break;
+
+               case RES_CQ_HW:
+                       if (r->com.state != RES_CQ_ALLOCATED)
+                               err = -EINVAL;
+                       else
+                               err = 0;
+                       break;
+
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_CQ_BUSY;
+                       if (cq)
+                               *cq = r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+                                enum res_cq_states state, struct res_srq **srq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_srq *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_SRQ_BUSY:
+                       err = -EINVAL;
+                       break;
+
+               case RES_SRQ_ALLOCATED:
+                       if (r->com.state != RES_SRQ_HW)
+                               err = -EINVAL;
+                       else if (atomic_read(&r->ref_count))
+                               err = -EBUSY;
+                       break;
+
+               case RES_SRQ_HW:
+                       if (r->com.state != RES_SRQ_ALLOCATED)
+                               err = -EINVAL;
+                       break;
+
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_SRQ_BUSY;
+                       if (srq)
+                               *srq = r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static void res_abort_move(struct mlx4_dev *dev, int slave,
+                          enum mlx4_resource type, int id)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[type], id);
+       if (r && (r->owner == slave))
+               r->state = r->from_state;
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void res_end_move(struct mlx4_dev *dev, int slave,
+                        enum mlx4_resource type, int id)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[type], id);
+       if (r && (r->owner == slave))
+               r->state = r->to_state;
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
+{
+       return mlx4_is_qp_reserved(dev, qpn);
+}
+
+static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int err;
+       int count;
+       int align;
+       int base;
+       int qpn;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               count = get_param_l(&in_param);
+               align = get_param_h(&in_param);
+               err = __mlx4_qp_reserve_range(dev, count, align, &base);
+               if (err)
+                       return err;
+
+               err = add_res_range(dev, slave, base, count, RES_QP, 0);
+               if (err) {
+                       __mlx4_qp_release_range(dev, base, count);
+                       return err;
+               }
+               set_param_l(out_param, base);
+               break;
+       case RES_OP_MAP_ICM:
+               qpn = get_param_l(&in_param) & 0x7fffff;
+               if (valid_reserved(dev, slave, qpn)) {
+                       err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
+                       if (err)
+                               return err;
+               }
+
+               err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
+                                          NULL, 1);
+               if (err)
+                       return err;
+
+               if (!valid_reserved(dev, slave, qpn)) {
+                       err = __mlx4_qp_alloc_icm(dev, qpn);
+                       if (err) {
+                               res_abort_move(dev, slave, RES_QP, qpn);
+                               return err;
+                       }
+               }
+
+               res_end_move(dev, slave, RES_QP, qpn);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int base;
+       int order;
+
+       if (op != RES_OP_RESERVE_AND_MAP)
+               return err;
+
+       order = get_param_l(&in_param);
+       base = __mlx4_alloc_mtt_range(dev, order);
+       if (base == -1)
+               return -ENOMEM;
+
+       err = add_res_range(dev, slave, base, 1, RES_MTT, order);
+       if (err)
+               __mlx4_free_mtt_range(dev, base, order);
+       else
+               set_param_l(out_param, base);
+
+       return err;
+}
+
+static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int index;
+       int id;
+       struct res_mpt *mpt;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               index = __mlx4_mr_reserve(dev);
+               if (index == -1)
+                       break;
+               id = index & mpt_mask(dev);
+
+               err = add_res_range(dev, slave, id, 1, RES_MPT, index);
+               if (err) {
+                       __mlx4_mr_release(dev, index);
+                       break;
+               }
+               set_param_l(out_param, index);
+               break;
+       case RES_OP_MAP_ICM:
+               index = get_param_l(&in_param);
+               id = index & mpt_mask(dev);
+               err = mr_res_start_move_to(dev, slave, id,
+                                          RES_MPT_MAPPED, &mpt);
+               if (err)
+                       return err;
+
+               err = __mlx4_mr_alloc_icm(dev, mpt->key);
+               if (err) {
+                       res_abort_move(dev, slave, RES_MPT, id);
+                       return err;
+               }
+
+               res_end_move(dev, slave, RES_MPT, id);
+               break;
+       }
+       return err;
+}
+
+static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int cqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               err = __mlx4_cq_alloc_icm(dev, &cqn);
+               if (err)
+                       break;
+
+               err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+               if (err) {
+                       __mlx4_cq_free_icm(dev, cqn);
+                       break;
+               }
+
+               set_param_l(out_param, cqn);
+               break;
+
+       default:
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int srqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               err = __mlx4_srq_alloc_icm(dev, &srqn);
+               if (err)
+                       break;
+
+               err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+               if (err) {
+                       __mlx4_srq_free_icm(dev, srqn);
+                       break;
+               }
+
+               set_param_l(out_param, srqn);
+               break;
+
+       default:
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct mac_res *res;
+
+       res = kzalloc(sizeof *res, GFP_KERNEL);
+       if (!res)
+               return -ENOMEM;
+       res->mac = mac;
+       res->port = (u8) port;
+       list_add_tail(&res->list,
+                     &tracker->slave_list[slave].res_list[RES_MAC]);
+       return 0;
+}
+
+static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
+                              int port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *mac_list =
+               &tracker->slave_list[slave].res_list[RES_MAC];
+       struct mac_res *res, *tmp;
+
+       list_for_each_entry_safe(res, tmp, mac_list, list) {
+               if (res->mac == mac && res->port == (u8) port) {
+                       list_del(&res->list);
+                       kfree(res);
+                       break;
+               }
+       }
+}
+
+static void rem_slave_macs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *mac_list =
+               &tracker->slave_list[slave].res_list[RES_MAC];
+       struct mac_res *res, *tmp;
+
+       list_for_each_entry_safe(res, tmp, mac_list, list) {
+               list_del(&res->list);
+               __mlx4_unregister_mac(dev, res->port, res->mac);
+               kfree(res);
+       }
+}
+
+static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int port;
+       u64 mac;
+
+       if (op != RES_OP_RESERVE_AND_MAP)
+               return err;
+
+       port = get_param_l(out_param);
+       mac = in_param;
+
+       err = __mlx4_register_mac(dev, port, mac);
+       if (err >= 0) {
+               set_param_l(out_param, err);
+               err = 0;
+       }
+
+       if (!err) {
+               err = mac_add_to_slave(dev, slave, mac, port);
+               if (err)
+                       __mlx4_unregister_mac(dev, port, mac);
+       }
+       return err;
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       return 0;
+}
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int alop = vhcr->op_modifier;
+
+       switch (vhcr->in_modifier) {
+       case RES_QP:
+               err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MTT:
+               err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MPT:
+               err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_CQ:
+               err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_SRQ:
+               err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MAC:
+               err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_VLAN:
+               err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                      u64 in_param)
+{
+       int err;
+       int count;
+       int base;
+       int qpn;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               base = get_param_l(&in_param) & 0x7fffff;
+               count = get_param_h(&in_param);
+               err = rem_res_range(dev, slave, base, count, RES_QP, 0);
+               if (err)
+                       break;
+               __mlx4_qp_release_range(dev, base, count);
+               break;
+       case RES_OP_MAP_ICM:
+               qpn = get_param_l(&in_param) & 0x7fffff;
+               err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
+                                          NULL, 0);
+               if (err)
+                       return err;
+
+               if (!valid_reserved(dev, slave, qpn))
+                       __mlx4_qp_free_icm(dev, qpn);
+
+               res_end_move(dev, slave, RES_QP, qpn);
+
+               if (valid_reserved(dev, slave, qpn))
+                       err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int base;
+       int order;
+
+       if (op != RES_OP_RESERVE_AND_MAP)
+               return err;
+
+       base = get_param_l(&in_param);
+       order = get_param_h(&in_param);
+       err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
+       if (!err)
+               __mlx4_free_mtt_range(dev, base, order);
+       return err;
+}
+
+static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param)
+{
+       int err = -EINVAL;
+       int index;
+       int id;
+       struct res_mpt *mpt;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               index = get_param_l(&in_param);
+               id = index & mpt_mask(dev);
+               err = get_res(dev, slave, id, RES_MPT, &mpt);
+               if (err)
+                       break;
+               index = mpt->key;
+               put_res(dev, slave, id, RES_MPT);
+
+               err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
+               if (err)
+                       break;
+               __mlx4_mr_release(dev, index);
+               break;
+       case RES_OP_MAP_ICM:
+                       index = get_param_l(&in_param);
+                       id = index & mpt_mask(dev);
+                       err = mr_res_start_move_to(dev, slave, id,
+                                                  RES_MPT_RESERVED, &mpt);
+                       if (err)
+                               return err;
+
+                       __mlx4_mr_free_icm(dev, mpt->key);
+                       res_end_move(dev, slave, RES_MPT, id);
+                       return err;
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                      u64 in_param, u64 *out_param)
+{
+       int cqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               cqn = get_param_l(&in_param);
+               err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+               if (err)
+                       break;
+
+               __mlx4_cq_free_icm(dev, cqn);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int srqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               srqn = get_param_l(&in_param);
+               err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+               if (err)
+                       break;
+
+               __mlx4_srq_free_icm(dev, srqn);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                           u64 in_param, u64 *out_param)
+{
+       int port;
+       int err = 0;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               port = get_param_l(out_param);
+               mac_del_from_slave(dev, slave, in_param, port);
+               __mlx4_unregister_mac(dev, port, in_param);
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+
+}
+
+static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                           u64 in_param, u64 *out_param)
+{
+       return 0;
+}
+
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err = -EINVAL;
+       int alop = vhcr->op_modifier;
+
+       switch (vhcr->in_modifier) {
+       case RES_QP:
+               err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
+                                 vhcr->in_param);
+               break;
+
+       case RES_MTT:
+               err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MPT:
+               err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param);
+               break;
+
+       case RES_CQ:
+               err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
+                                 vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_SRQ:
+               err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MAC:
+               err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_VLAN:
+               err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       default:
+               break;
+       }
+       return err;
+}
+
+/* ugly but other choices are uglier */
+static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
+{
+       return (be32_to_cpu(mpt->flags) >> 9) & 1;
+}
+
+static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
+{
+       return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
+}
+
+static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
+{
+       return be32_to_cpu(mpt->mtt_sz);
+}
+
+static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
+{
+       return be32_to_cpu(mpt->pd_flags) & 0xffffff;
+}
+
+static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
+{
+       return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
+{
+       int page_shift = (qpc->log_page_size & 0x3f) + 12;
+       int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
+       int log_sq_sride = qpc->sq_size_stride & 7;
+       int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
+       int log_rq_stride = qpc->rq_size_stride & 7;
+       int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
+       int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
+       int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+       int sq_size;
+       int rq_size;
+       int total_pages;
+       int total_mem;
+       int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+
+       sq_size = 1 << (log_sq_size + log_sq_sride + 4);
+       rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
+       total_mem = sq_size + rq_size;
+       total_pages =
+               roundup_pow_of_two((total_mem + (page_offset << 6)) >>
+                                  page_shift);
+
+       return total_pages;
+}
+
+static int qp_get_pdn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->pd) & 0xffffff;
+}
+
+static int pdn2slave(int pdn)
+{
+       return (pdn >> NOT_MASKED_PD_BITS) - 1;
+}
+
+static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
+                          int size, struct res_mtt *mtt)
+{
+       int res_start = mtt->com.res_id;
+       int res_size = (1 << mtt->order);
+
+       if (start < res_start || start + size > res_start + res_size)
+               return -EPERM;
+       return 0;
+}
+
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier;
+       struct res_mtt *mtt;
+       struct res_mpt *mpt;
+       int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
+       int phys;
+       int id;
+
+       id = index & mpt_mask(dev);
+       err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
+       if (err)
+               return err;
+
+       phys = mr_phys_mpt(inbox->buf);
+       if (!phys) {
+               err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+               if (err)
+                       goto ex_abort;
+
+               err = check_mtt_range(dev, slave, mtt_base,
+                                     mr_get_mtt_size(inbox->buf), mtt);
+               if (err)
+                       goto ex_put;
+
+               mpt->mtt = mtt;
+       }
+
+       if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
+               err = -EPERM;
+               goto ex_put;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put;
+
+       if (!phys) {
+               atomic_inc(&mtt->ref_count);
+               put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       }
+
+       res_end_move(dev, slave, RES_MPT, id);
+       return 0;
+
+ex_put:
+       if (!phys)
+               put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_MPT, id);
+
+       return err;
+}
+
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier;
+       struct res_mpt *mpt;
+       int id;
+
+       id = index & mpt_mask(dev);
+       err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
+       if (err)
+               return err;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_abort;
+
+       if (mpt->mtt)
+               atomic_dec(&mpt->mtt->ref_count);
+
+       res_end_move(dev, slave, RES_MPT, id);
+       return 0;
+
+ex_abort:
+       res_abort_move(dev, slave, RES_MPT, id);
+
+       return err;
+}
+
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier;
+       struct res_mpt *mpt;
+       int id;
+
+       id = index & mpt_mask(dev);
+       err = get_res(dev, slave, id, RES_MPT, &mpt);
+       if (err)
+               return err;
+
+       if (mpt->com.from_state != RES_MPT_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+out:
+       put_res(dev, slave, id, RES_MPT);
+       return err;
+}
+
+static int qp_get_rcqn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
+}
+
+static int qp_get_scqn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->cqn_send) & 0xffffff;
+}
+
+static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->srqn) & 0x1ffffff;
+}
+
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int qpn = vhcr->in_modifier & 0x7fffff;
+       struct res_mtt *mtt;
+       struct res_qp *qp;
+       struct mlx4_qp_context *qpc = inbox->buf + 8;
+       int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
+       int mtt_size = qp_get_mtt_size(qpc);
+       struct res_cq *rcq;
+       struct res_cq *scq;
+       int rcqn = qp_get_rcqn(qpc);
+       int scqn = qp_get_scqn(qpc);
+       u32 srqn = qp_get_srqn(qpc) & 0xffffff;
+       int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
+       struct res_srq *srq;
+       int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+
+       err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
+       if (err)
+               return err;
+       qp->local_qpn = local_qpn;
+
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto ex_abort;
+
+       err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+       if (err)
+               goto ex_put_mtt;
+
+       if (pdn2slave(qp_get_pdn(qpc)) != slave) {
+               err = -EPERM;
+               goto ex_put_mtt;
+       }
+
+       err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
+       if (err)
+               goto ex_put_mtt;
+
+       if (scqn != rcqn) {
+               err = get_res(dev, slave, scqn, RES_CQ, &scq);
+               if (err)
+                       goto ex_put_rcq;
+       } else
+               scq = rcq;
+
+       if (use_srq) {
+               err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+               if (err)
+                       goto ex_put_scq;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put_srq;
+       atomic_inc(&mtt->ref_count);
+       qp->mtt = mtt;
+       atomic_inc(&rcq->ref_count);
+       qp->rcq = rcq;
+       atomic_inc(&scq->ref_count);
+       qp->scq = scq;
+
+       if (scqn != rcqn)
+               put_res(dev, slave, scqn, RES_CQ);
+
+       if (use_srq) {
+               atomic_inc(&srq->ref_count);
+               put_res(dev, slave, srqn, RES_SRQ);
+               qp->srq = srq;
+       }
+       put_res(dev, slave, rcqn, RES_CQ);
+       put_res(dev, slave, mtt_base, RES_MTT);
+       res_end_move(dev, slave, RES_QP, qpn);
+
+       return 0;
+
+ex_put_srq:
+       if (use_srq)
+               put_res(dev, slave, srqn, RES_SRQ);
+ex_put_scq:
+       if (scqn != rcqn)
+               put_res(dev, slave, scqn, RES_CQ);
+ex_put_rcq:
+       put_res(dev, slave, rcqn, RES_CQ);
+ex_put_mtt:
+       put_res(dev, slave, mtt_base, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_QP, qpn);
+
+       return err;
+}
+
+static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
+{
+       return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
+{
+       int log_eq_size = eqc->log_eq_size & 0x1f;
+       int page_shift = (eqc->log_page_size & 0x3f) + 12;
+
+       if (log_eq_size + 5 < page_shift)
+               return 1;
+
+       return 1 << (log_eq_size + 5 - page_shift);
+}
+
+static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
+{
+       return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
+{
+       int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
+       int page_shift = (cqc->log_page_size & 0x3f) + 12;
+
+       if (log_cq_size + 5 < page_shift)
+               return 1;
+
+       return 1 << (log_cq_size + 5 - page_shift);
+}
+
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int eqn = vhcr->in_modifier;
+       int res_id = (slave << 8) | eqn;
+       struct mlx4_eq_context *eqc = inbox->buf;
+       int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
+       int mtt_size = eq_get_mtt_size(eqc);
+       struct res_eq *eq;
+       struct res_mtt *mtt;
+
+       err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+       if (err)
+               return err;
+       err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
+       if (err)
+               goto out_add;
+
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto out_move;
+
+       err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+       if (err)
+               goto out_put;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto out_put;
+
+       atomic_inc(&mtt->ref_count);
+       eq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_EQ, res_id);
+       return 0;
+
+out_put:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+       res_abort_move(dev, slave, RES_EQ, res_id);
+out_add:
+       rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+       return err;
+}
+
+static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
+                             int len, struct res_mtt **res)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_mtt *mtt;
+       int err = -EINVAL;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
+                           com.list) {
+               if (!check_mtt_range(dev, slave, start, len, mtt)) {
+                       *res = mtt;
+                       mtt->com.from_state = mtt->com.state;
+                       mtt->com.state = RES_MTT_BUSY;
+                       err = 0;
+                       break;
+               }
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_mtt mtt;
+       __be64 *page_list = inbox->buf;
+       u64 *pg_list = (u64 *)page_list;
+       int i;
+       struct res_mtt *rmtt = NULL;
+       int start = be64_to_cpu(page_list[0]);
+       int npages = vhcr->in_modifier;
+       int err;
+
+       err = get_containing_mtt(dev, slave, start, npages, &rmtt);
+       if (err)
+               return err;
+
+       /* Call the SW implementation of write_mtt:
+        * - Prepare a dummy mtt struct
+        * - Translate inbox contents to simple addresses in host endianess */
+       mtt.offset = 0;  /* TBD this is broken but I don't handle it since
+                           we don't really use it */
+       mtt.order = 0;
+       mtt.page_shift = 0;
+       for (i = 0; i < npages; ++i)
+               pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
+
+       err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
+                              ((u64 *)page_list + 2));
+
+       if (rmtt)
+               put_res(dev, slave, rmtt->com.res_id, RES_MTT);
+
+       return err;
+}
+
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int eqn = vhcr->in_modifier;
+       int res_id = eqn | (slave << 8);
+       struct res_eq *eq;
+       int err;
+
+       err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
+       if (err)
+               return err;
+
+       err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
+       if (err)
+               goto ex_abort;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put;
+
+       atomic_dec(&eq->mtt->ref_count);
+       put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_EQ, res_id);
+       rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+
+       return 0;
+
+ex_put:
+       put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_EQ, res_id);
+
+       return err;
+}
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_event_eq_info *event_eq;
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 in_modifier = 0;
+       int err;
+       int res_id;
+       struct res_eq *req;
+
+       if (!priv->mfunc.master.slave_state)
+               return -EINVAL;
+
+       event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
+
+       /* Create the event only if the slave is registered */
+       if ((event_eq->event_type & (1 << eqe->type)) == 0)
+               return 0;
+
+       mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+       res_id = (slave << 8) | event_eq->eqn;
+       err = get_res(dev, slave, res_id, RES_EQ, &req);
+       if (err)
+               goto unlock;
+
+       if (req->com.from_state != RES_EQ_HW) {
+               err = -EINVAL;
+               goto put;
+       }
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               goto put;
+       }
+
+       if (eqe->type == MLX4_EVENT_TYPE_CMD) {
+               ++event_eq->token;
+               eqe->event.cmd.token = cpu_to_be16(event_eq->token);
+       }
+
+       memcpy(mailbox->buf, (u8 *) eqe, 28);
+
+       in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+
+       err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
+                      MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
+
+       put_res(dev, slave, res_id, RES_EQ);
+       mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+
+put:
+       put_res(dev, slave, res_id, RES_EQ);
+
+unlock:
+       mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+       return err;
+}
+
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int eqn = vhcr->in_modifier;
+       int res_id = eqn | (slave << 8);
+       struct res_eq *eq;
+       int err;
+
+       err = get_res(dev, slave, res_id, RES_EQ, &eq);
+       if (err)
+               return err;
+
+       if (eq->com.from_state != RES_EQ_HW) {
+               err = -EINVAL;
+               goto ex_put;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ex_put:
+       put_res(dev, slave, res_id, RES_EQ);
+       return err;
+}
+
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int cqn = vhcr->in_modifier;
+       struct mlx4_cq_context *cqc = inbox->buf;
+       int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+       struct res_cq *cq;
+       struct res_mtt *mtt;
+
+       err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
+       if (err)
+               return err;
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto out_move;
+       err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+       if (err)
+               goto out_put;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto out_put;
+       atomic_inc(&mtt->ref_count);
+       cq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_CQ, cqn);
+       return 0;
+
+out_put:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+       res_abort_move(dev, slave, RES_CQ, cqn);
+       return err;
+}
+
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int cqn = vhcr->in_modifier;
+       struct res_cq *cq;
+
+       err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
+       if (err)
+               return err;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto out_move;
+       atomic_dec(&cq->mtt->ref_count);
+       res_end_move(dev, slave, RES_CQ, cqn);
+       return 0;
+
+out_move:
+       res_abort_move(dev, slave, RES_CQ, cqn);
+       return err;
+}
+
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int cqn = vhcr->in_modifier;
+       struct res_cq *cq;
+       int err;
+
+       err = get_res(dev, slave, cqn, RES_CQ, &cq);
+       if (err)
+               return err;
+
+       if (cq->com.from_state != RES_CQ_HW)
+               goto ex_put;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+       put_res(dev, slave, cqn, RES_CQ);
+
+       return err;
+}
+
+static int handle_resize(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd,
+                        struct res_cq *cq)
+{
+       int err;
+       struct res_mtt *orig_mtt;
+       struct res_mtt *mtt;
+       struct mlx4_cq_context *cqc = inbox->buf;
+       int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+
+       err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
+       if (err)
+               return err;
+
+       if (orig_mtt != cq->mtt) {
+               err = -EINVAL;
+               goto ex_put;
+       }
+
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto ex_put;
+
+       err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+       if (err)
+               goto ex_put1;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put1;
+       atomic_dec(&orig_mtt->ref_count);
+       put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+       atomic_inc(&mtt->ref_count);
+       cq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       return 0;
+
+ex_put1:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_put:
+       put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+
+       return err;
+
+}
+
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int cqn = vhcr->in_modifier;
+       struct res_cq *cq;
+       int err;
+
+       err = get_res(dev, slave, cqn, RES_CQ, &cq);
+       if (err)
+               return err;
+
+       if (cq->com.from_state != RES_CQ_HW)
+               goto ex_put;
+
+       if (vhcr->op_modifier == 0) {
+               err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
+               if (err)
+                       goto ex_put;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+       put_res(dev, slave, cqn, RES_CQ);
+
+       return err;
+}
+
+static int srq_get_pdn(struct mlx4_srq_context *srqc)
+{
+       return be32_to_cpu(srqc->pd) & 0xffffff;
+}
+
+static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
+{
+       int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
+       int log_rq_stride = srqc->logstride & 7;
+       int page_shift = (srqc->log_page_size & 0x3f) + 12;
+
+       if (log_srq_size + log_rq_stride + 4 < page_shift)
+               return 1;
+
+       return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
+}
+
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_mtt *mtt;
+       struct res_srq *srq;
+       struct mlx4_srq_context *srqc = inbox->buf;
+       int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
+
+       if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
+               return -EINVAL;
+
+       err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
+       if (err)
+               return err;
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto ex_abort;
+       err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
+                             mtt);
+       if (err)
+               goto ex_put_mtt;
+
+       if (pdn2slave(srq_get_pdn(srqc)) != slave) {
+               err = -EPERM;
+               goto ex_put_mtt;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put_mtt;
+
+       atomic_inc(&mtt->ref_count);
+       srq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_SRQ, srqn);
+       return 0;
+
+ex_put_mtt:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_SRQ, srqn);
+
+       return err;
+}
+
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_srq *srq;
+
+       err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
+       if (err)
+               return err;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_abort;
+       atomic_dec(&srq->mtt->ref_count);
+       if (srq->cq)
+               atomic_dec(&srq->cq->ref_count);
+       res_end_move(dev, slave, RES_SRQ, srqn);
+
+       return 0;
+
+ex_abort:
+       res_abort_move(dev, slave, RES_SRQ, srqn);
+
+       return err;
+}
+
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_srq *srq;
+
+       err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+       if (err)
+               return err;
+       if (srq->com.from_state != RES_SRQ_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+       put_res(dev, slave, srqn, RES_SRQ);
+       return err;
+}
+
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_srq *srq;
+
+       err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+       if (err)
+               return err;
+
+       if (srq->com.from_state != RES_SRQ_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+       put_res(dev, slave, srqn, RES_SRQ);
+       return err;
+}
+
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr,
+                       struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int qpn = vhcr->in_modifier & 0x7fffff;
+       struct res_qp *qp;
+
+       err = get_res(dev, slave, qpn, RES_QP, &qp);
+       if (err)
+               return err;
+       if (qp->com.from_state != RES_QP_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+       put_res(dev, slave, qpn, RES_QP);
+       return err;
+}
+
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_qp_context *qpc = inbox->buf + 8;
+
+       update_ud_gid(dev, qpc, (u8)slave);
+
+       return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int qpn = vhcr->in_modifier & 0x7fffff;
+       struct res_qp *qp;
+
+       err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
+       if (err)
+               return err;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_abort;
+
+       atomic_dec(&qp->mtt->ref_count);
+       atomic_dec(&qp->rcq->ref_count);
+       atomic_dec(&qp->scq->ref_count);
+       if (qp->srq)
+               atomic_dec(&qp->srq->ref_count);
+       res_end_move(dev, slave, RES_QP, qpn);
+       return 0;
+
+ex_abort:
+       res_abort_move(dev, slave, RES_QP, qpn);
+
+       return err;
+}
+
+static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
+                               struct res_qp *rqp, u8 *gid)
+{
+       struct res_gid *res;
+
+       list_for_each_entry(res, &rqp->mcg_list, list) {
+               if (!memcmp(res->gid, gid, 16))
+                       return res;
+       }
+       return NULL;
+}
+
+static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+                      u8 *gid, enum mlx4_protocol prot)
+{
+       struct res_gid *res;
+       int err;
+
+       res = kzalloc(sizeof *res, GFP_KERNEL);
+       if (!res)
+               return -ENOMEM;
+
+       spin_lock_irq(&rqp->mcg_spl);
+       if (find_gid(dev, slave, rqp, gid)) {
+               kfree(res);
+               err = -EEXIST;
+       } else {
+               memcpy(res->gid, gid, 16);
+               res->prot = prot;
+               list_add_tail(&res->list, &rqp->mcg_list);
+               err = 0;
+       }
+       spin_unlock_irq(&rqp->mcg_spl);
+
+       return err;
+}
+
+static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+                      u8 *gid, enum mlx4_protocol prot)
+{
+       struct res_gid *res;
+       int err;
+
+       spin_lock_irq(&rqp->mcg_spl);
+       res = find_gid(dev, slave, rqp, gid);
+       if (!res || res->prot != prot)
+               err = -EINVAL;
+       else {
+               list_del(&res->list);
+               kfree(res);
+               err = 0;
+       }
+       spin_unlock_irq(&rqp->mcg_spl);
+
+       return err;
+}
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_qp qp; /* dummy for calling attach/detach */
+       u8 *gid = inbox->buf;
+       enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
+       int err, err1;
+       int qpn;
+       struct res_qp *rqp;
+       int attach = vhcr->op_modifier;
+       int block_loopback = vhcr->in_modifier >> 31;
+       u8 steer_type_mask = 2;
+       enum mlx4_steer_type type = gid[7] & steer_type_mask;
+
+       qpn = vhcr->in_modifier & 0xffffff;
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
+       if (err)
+               return err;
+
+       qp.qpn = qpn;
+       if (attach) {
+               err = add_mcg_res(dev, slave, rqp, gid, prot);
+               if (err)
+                       goto ex_put;
+
+               err = mlx4_qp_attach_common(dev, &qp, gid,
+                                           block_loopback, prot, type);
+               if (err)
+                       goto ex_rem;
+       } else {
+               err = rem_mcg_res(dev, slave, rqp, gid, prot);
+               if (err)
+                       goto ex_put;
+               err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
+       }
+
+       put_res(dev, slave, qpn, RES_QP);
+       return 0;
+
+ex_rem:
+       /* ignore error return below, already in error */
+       err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
+ex_put:
+       put_res(dev, slave, qpn, RES_QP);
+
+       return err;
+}
+
+enum {
+       BUSY_MAX_RETRIES = 10
+};
+
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier & 0xffff;
+
+       err = get_res(dev, slave, index, RES_COUNTER, NULL);
+       if (err)
+               return err;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       put_res(dev, slave, index, RES_COUNTER);
+       return err;
+}
+
+static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
+{
+       struct res_gid *rgid;
+       struct res_gid *tmp;
+       int err;
+       struct mlx4_qp qp; /* dummy for calling attach/detach */
+
+       list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
+               qp.qpn = rqp->local_qpn;
+               err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
+                                           MLX4_MC_STEER);
+               list_del(&rgid->list);
+               kfree(rgid);
+       }
+}
+
+static int _move_all_busy(struct mlx4_dev *dev, int slave,
+                         enum mlx4_resource type, int print)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker =
+               &priv->mfunc.master.res_tracker;
+       struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
+       struct res_common *r;
+       struct res_common *tmp;
+       int busy;
+
+       busy = 0;
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(r, tmp, rlist, list) {
+               if (r->owner == slave) {
+                       if (!r->removing) {
+                               if (r->state == RES_ANY_BUSY) {
+                                       if (print)
+                                               mlx4_dbg(dev,
+                                                        "%s id 0x%x is busy\n",
+                                                         ResourceType(type),
+                                                         r->res_id);
+                                       ++busy;
+                               } else {
+                                       r->from_state = r->state;
+                                       r->state = RES_ANY_BUSY;
+                                       r->removing = 1;
+                               }
+                       }
+               }
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return busy;
+}
+
+static int move_all_busy(struct mlx4_dev *dev, int slave,
+                        enum mlx4_resource type)
+{
+       unsigned long begin;
+       int busy;
+
+       begin = jiffies;
+       do {
+               busy = _move_all_busy(dev, slave, type, 0);
+               if (time_after(jiffies, begin + 5 * HZ))
+                       break;
+               if (busy)
+                       cond_resched();
+       } while (busy);
+
+       if (busy)
+               busy = _move_all_busy(dev, slave, type, 1);
+
+       return busy;
+}
+static void rem_slave_qps(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *qp_list =
+               &tracker->slave_list[slave].res_list[RES_QP];
+       struct res_qp *qp;
+       struct res_qp *tmp;
+       int state;
+       u64 in_param;
+       int qpn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_QP);
+       if (err)
+               mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
+                         "for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (qp->com.owner == slave) {
+                       qpn = qp->com.res_id;
+                       detach_qp(dev, slave, qp);
+                       state = qp->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_QP_RESERVED:
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_QP],
+                                                         qp->com.res_id);
+                                       list_del(&qp->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(qp);
+                                       state = 0;
+                                       break;
+                               case RES_QP_MAPPED:
+                                       if (!valid_reserved(dev, slave, qpn))
+                                               __mlx4_qp_free_icm(dev, qpn);
+                                       state = RES_QP_RESERVED;
+                                       break;
+                               case RES_QP_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param,
+                                                      qp->local_qpn, 2,
+                                                      MLX4_CMD_2RST_QP,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_qps: failed"
+                                                        " to move slave %d qpn %d to"
+                                                        " reset\n", slave,
+                                                        qp->local_qpn);
+                                       atomic_dec(&qp->rcq->ref_count);
+                                       atomic_dec(&qp->scq->ref_count);
+                                       atomic_dec(&qp->mtt->ref_count);
+                                       if (qp->srq)
+                                               atomic_dec(&qp->srq->ref_count);
+                                       state = RES_QP_MAPPED;
+                                       break;
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *srq_list =
+               &tracker->slave_list[slave].res_list[RES_SRQ];
+       struct res_srq *srq;
+       struct res_srq *tmp;
+       int state;
+       u64 in_param;
+       LIST_HEAD(tlist);
+       int srqn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_SRQ);
+       if (err)
+               mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (srq->com.owner == slave) {
+                       srqn = srq->com.res_id;
+                       state = srq->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_SRQ_ALLOCATED:
+                                       __mlx4_srq_free_icm(dev, srqn);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_SRQ],
+                                                         srqn);
+                                       list_del(&srq->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(srq);
+                                       state = 0;
+                                       break;
+
+                               case RES_SRQ_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param, srqn, 1,
+                                                      MLX4_CMD_HW2SW_SRQ,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_srqs: failed"
+                                                        " to move slave %d srq %d to"
+                                                        " SW ownership\n",
+                                                        slave, srqn);
+
+                                       atomic_dec(&srq->mtt->ref_count);
+                                       if (srq->cq)
+                                               atomic_dec(&srq->cq->ref_count);
+                                       state = RES_SRQ_ALLOCATED;
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *cq_list =
+               &tracker->slave_list[slave].res_list[RES_CQ];
+       struct res_cq *cq;
+       struct res_cq *tmp;
+       int state;
+       u64 in_param;
+       LIST_HEAD(tlist);
+       int cqn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_CQ);
+       if (err)
+               mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
+                       cqn = cq->com.res_id;
+                       state = cq->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_CQ_ALLOCATED:
+                                       __mlx4_cq_free_icm(dev, cqn);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_CQ],
+                                                         cqn);
+                                       list_del(&cq->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(cq);
+                                       state = 0;
+                                       break;
+
+                               case RES_CQ_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param, cqn, 1,
+                                                      MLX4_CMD_HW2SW_CQ,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_cqs: failed"
+                                                        " to move slave %d cq %d to"
+                                                        " SW ownership\n",
+                                                        slave, cqn);
+                                       atomic_dec(&cq->mtt->ref_count);
+                                       state = RES_CQ_ALLOCATED;
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *mpt_list =
+               &tracker->slave_list[slave].res_list[RES_MPT];
+       struct res_mpt *mpt;
+       struct res_mpt *tmp;
+       int state;
+       u64 in_param;
+       LIST_HEAD(tlist);
+       int mptn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_MPT);
+       if (err)
+               mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (mpt->com.owner == slave) {
+                       mptn = mpt->com.res_id;
+                       state = mpt->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_MPT_RESERVED:
+                                       __mlx4_mr_release(dev, mpt->key);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_MPT],
+                                                         mptn);
+                                       list_del(&mpt->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(mpt);
+                                       state = 0;
+                                       break;
+
+                               case RES_MPT_MAPPED:
+                                       __mlx4_mr_free_icm(dev, mpt->key);
+                                       state = RES_MPT_RESERVED;
+                                       break;
+
+                               case RES_MPT_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param, mptn, 0,
+                                                    MLX4_CMD_HW2SW_MPT,
+                                                    MLX4_CMD_TIME_CLASS_A,
+                                                    MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_mrs: failed"
+                                                        " to move slave %d mpt %d to"
+                                                        " SW ownership\n",
+                                                        slave, mptn);
+                                       if (mpt->mtt)
+                                               atomic_dec(&mpt->mtt->ref_count);
+                                       state = RES_MPT_MAPPED;
+                                       break;
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker =
+               &priv->mfunc.master.res_tracker;
+       struct list_head *mtt_list =
+               &tracker->slave_list[slave].res_list[RES_MTT];
+       struct res_mtt *mtt;
+       struct res_mtt *tmp;
+       int state;
+       LIST_HEAD(tlist);
+       int base;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_MTT);
+       if (err)
+               mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (mtt->com.owner == slave) {
+                       base = mtt->com.res_id;
+                       state = mtt->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_MTT_ALLOCATED:
+                                       __mlx4_free_mtt_range(dev, base,
+                                                             mtt->order);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_MTT],
+                                                         base);
+                                       list_del(&mtt->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(mtt);
+                                       state = 0;
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *eq_list =
+               &tracker->slave_list[slave].res_list[RES_EQ];
+       struct res_eq *eq;
+       struct res_eq *tmp;
+       int err;
+       int state;
+       LIST_HEAD(tlist);
+       int eqn;
+       struct mlx4_cmd_mailbox *mailbox;
+
+       err = move_all_busy(dev, slave, RES_EQ);
+       if (err)
+               mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (eq->com.owner == slave) {
+                       eqn = eq->com.res_id;
+                       state = eq->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_EQ_RESERVED:
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_EQ],
+                                                         eqn);
+                                       list_del(&eq->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(eq);
+                                       state = 0;
+                                       break;
+
+                               case RES_EQ_HW:
+                                       mailbox = mlx4_alloc_cmd_mailbox(dev);
+                                       if (IS_ERR(mailbox)) {
+                                               cond_resched();
+                                               continue;
+                                       }
+                                       err = mlx4_cmd_box(dev, slave, 0,
+                                                          eqn & 0xff, 0,
+                                                          MLX4_CMD_HW2SW_EQ,
+                                                          MLX4_CMD_TIME_CLASS_A,
+                                                          MLX4_CMD_NATIVE);
+                                       mlx4_dbg(dev, "rem_slave_eqs: failed"
+                                                " to move slave %d eqs %d to"
+                                                " SW ownership\n", slave, eqn);
+                                       mlx4_free_cmd_mailbox(dev, mailbox);
+                                       if (!err) {
+                                               atomic_dec(&eq->mtt->ref_count);
+                                               state = RES_EQ_RESERVED;
+                                       }
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+       /*VLAN*/
+       rem_slave_macs(dev, slave);
+       rem_slave_qps(dev, slave);
+       rem_slave_srqs(dev, slave);
+       rem_slave_cqs(dev, slave);
+       rem_slave_mrs(dev, slave);
+       rem_slave_eqs(dev, slave);
+       rem_slave_mtts(dev, slave);
+       mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+}
index e2337a7..8024982 100644 (file)
@@ -45,7 +45,8 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
        int err = 0;
 
        err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
-                          MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+                          MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_WRAPPED);
        if (err) {
                mlx4_err(dev, "Sense command failed for port: %d\n", port);
                return err;
index 9cbf3fc..2823fff 100644 (file)
@@ -31,6 +31,8 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
+
 #include <linux/mlx4/cmd.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
 #include "mlx4.h"
 #include "icm.h"
 
-struct mlx4_srq_context {
-       __be32                  state_logsize_srqn;
-       u8                      logstride;
-       u8                      reserved1;
-       __be16                  xrcd;
-       __be32                  pg_offset_cqn;
-       u32                     reserved2;
-       u8                      log_page_size;
-       u8                      reserved3[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       __be32                  pd;
-       __be16                  limit_watermark;
-       __be16                  wqe_cnt;
-       u16                     reserved4;
-       __be16                  wqe_counter;
-       u32                     reserved5;
-       __be64                  db_rec_addr;
-};
-
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
 {
        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
@@ -85,8 +67,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
 static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int srq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
-                       MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0,
+                       MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -94,48 +77,109 @@ static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
 {
        return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
                            mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
 {
        return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
-                       MLX4_CMD_TIME_CLASS_B);
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int srq_num)
 {
        return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
-                  struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
 {
        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_srq_context *srq_context;
-       u64 mtt_addr;
        int err;
 
-       srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
-       if (srq->srqn == -1)
+
+       *srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+       if (*srqn == -1)
                return -ENOMEM;
 
-       err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+       err = mlx4_table_get(dev, &srq_table->table, *srqn);
        if (err)
                goto err_out;
 
-       err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+       err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
        if (err)
                goto err_put;
+       return 0;
+
+err_put:
+       mlx4_table_put(dev, &srq_table->table, *srqn);
+
+err_out:
+       mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+       return err;
+}
+
+static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
+                                  RES_OP_RESERVE_AND_MAP,
+                                  MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (!err)
+                       *srqn = get_param_l(&out_param);
+
+               return err;
+       }
+       return __mlx4_srq_alloc_icm(dev, srqn);
+}
+
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+
+       mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
+       mlx4_table_put(dev, &srq_table->table, srqn);
+       mlx4_bitmap_free(&srq_table->bitmap, srqn);
+}
+
+static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, srqn);
+               if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
+                            MLX4_CMD_FREE_RES,
+                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
+               return;
+       }
+       __mlx4_srq_free_icm(dev, srqn);
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
+                  struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+{
+       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_srq_context *srq_context;
+       u64 mtt_addr;
+       int err;
+
+       err = mlx4_srq_alloc_icm(dev, &srq->srqn);
+       if (err)
+               return err;
 
        spin_lock_irq(&srq_table->lock);
        err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
        spin_unlock_irq(&srq_table->lock);
        if (err)
-               goto err_cmpt_put;
+               goto err_icm;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
@@ -174,15 +218,8 @@ err_radix:
        radix_tree_delete(&srq_table->tree, srq->srqn);
        spin_unlock_irq(&srq_table->lock);
 
-err_cmpt_put:
-       mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
-
-err_put:
-       mlx4_table_put(dev, &srq_table->table, srq->srqn);
-
-err_out:
-       mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-
+err_icm:
+       mlx4_srq_free_icm(dev, srq->srqn);
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
@@ -204,8 +241,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
                complete(&srq->free);
        wait_for_completion(&srq->free);
 
-       mlx4_table_put(dev, &srq_table->table, srq->srqn);
-       mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+       mlx4_srq_free_icm(dev, srq->srqn);
 }
 EXPORT_SYMBOL_GPL(mlx4_srq_free);
 
@@ -245,6 +281,8 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
 
        spin_lock_init(&srq_table->lock);
        INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+       if (mlx4_is_slave(dev))
+               return 0;
 
        err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
                               dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
@@ -256,5 +294,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
 {
+       if (mlx4_is_slave(dev))
+               return;
        mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
 }
index 11be150..b7fc26c 100644 (file)
@@ -356,7 +356,7 @@ enum myri10ge_mcp_cmd_type {
        MXGEFW_CMD_GET_DCA_OFFSET = 56,
        /* offset of dca control for WDMAs */
 
-       /* VMWare NetQueue commands */
+       /* VMware NetQueue commands */
        MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57,
        MXGEFW_CMD_NETQ_ADD_FILTER = 58,
        /* data0 = filter_id << 16 | queue << 8 | type */
index 16d4d8e..ef76725 100644 (file)
@@ -3305,7 +3305,7 @@ static void vxge_tx_watchdog(struct net_device *dev)
  *
  * Add the vlan id to the devices vlan id table
  */
-static void
+static int
 vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct vxgedev *vdev = netdev_priv(dev);
@@ -3320,6 +3320,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                vxge_hw_vpath_vid_add(vpath->handle, vid);
        }
        set_bit(vid, vdev->active_vlans);
+       return 0;
 }
 
 /**
@@ -3329,7 +3330,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  *
  * Remove the vlan id from the device's vlan id table
  */
-static void
+static int
 vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct vxgedev *vdev = netdev_priv(dev);
@@ -3348,6 +3349,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        vxge_debug_entryexit(VXGE_TRACE,
                "%s:%d  Exiting...", __func__, __LINE__);
        clear_bit(vid, vdev->active_vlans);
+       return 0;
 }
 
 static const struct net_device_ops vxge_netdev_ops = {
index 05db543..90497ff 100644 (file)
@@ -2,4 +2,5 @@
 # Makefile for the A Semi network device drivers.
 #
 
-obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
+pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
index 823f845..69b8e4e 100644 (file)
@@ -97,8 +97,8 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
 static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
                                struct qlcnic_esw_func_cfg *);
-static void qlcnic_vlan_rx_add(struct net_device *, u16);
-static void qlcnic_vlan_rx_del(struct net_device *, u16);
+static int qlcnic_vlan_rx_add(struct net_device *, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, u16);
 
 /*  PCI Device ID Table  */
 #define ENTRY(device) \
@@ -735,20 +735,22 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
                adapter->pvid = 0;
 }
 
-static void
+static int
 qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        set_bit(vid, adapter->vlans);
+       return 0;
 }
 
-static void
+static int
 qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
        qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
        clear_bit(vid, adapter->vlans);
+       return 0;
 }
 
 static void
index 1ce4e08..b548987 100644 (file)
@@ -2349,56 +2349,66 @@ static int qlge_set_features(struct net_device *ndev,
        return 0;
 }
 
-static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
 {
        u32 enable_bit = MAC_ADDR_E;
+       int err;
 
-       if (ql_set_mac_addr_reg
-           (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+       err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+                                 MAC_ADDR_TYPE_VLAN, vid);
+       if (err)
                netif_err(qdev, ifup, qdev->ndev,
                          "Failed to init vlan address.\n");
-       }
+       return err;
 }
 
-static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
        int status;
+       int err;
 
        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
        if (status)
-               return;
+               return status;
 
-       __qlge_vlan_rx_add_vid(qdev, vid);
+       err = __qlge_vlan_rx_add_vid(qdev, vid);
        set_bit(vid, qdev->active_vlans);
 
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+       return err;
 }
 
-static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
 {
        u32 enable_bit = 0;
+       int err;
 
-       if (ql_set_mac_addr_reg
-           (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+       err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+                                 MAC_ADDR_TYPE_VLAN, vid);
+       if (err)
                netif_err(qdev, ifup, qdev->ndev,
                          "Failed to clear vlan address.\n");
-       }
+       return err;
 }
 
-static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
        int status;
+       int err;
 
        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
        if (status)
-               return;
+               return status;
 
-       __qlge_vlan_rx_kill_vid(qdev, vid);
+       err = __qlge_vlan_rx_kill_vid(qdev, vid);
        clear_bit(vid, qdev->active_vlans);
 
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+       return err;
 }
 
 static void qlge_restore_vlan(struct ql_adapter *qdev)
index e5a6d8e..7a0c800 100644 (file)
@@ -474,7 +474,6 @@ enum rtl_register_content {
        /* Config1 register p.24 */
        LEDS1           = (1 << 7),
        LEDS0           = (1 << 6),
-       MSIEnable       = (1 << 5),     /* Enable Message Signaled Interrupt */
        Speed_down      = (1 << 4),
        MEMMAP          = (1 << 3),
        IOMAP           = (1 << 2),
@@ -482,6 +481,7 @@ enum rtl_register_content {
        PMEnable        = (1 << 0),     /* Power Management Enable */
 
        /* Config2 register p. 25 */
+       MSIEnable       = (1 << 5),     /* 8169 only. Reserved in the 8168. */
        PCI_Clock_66MHz = 0x01,
        PCI_Clock_33MHz = 0x00,
 
@@ -1180,11 +1180,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
        return value;
 }
 
-static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
 {
-       RTL_W16(IntrMask, 0x0000);
+       void __iomem *ioaddr = tp->mmio_addr;
 
-       RTL_W16(IntrStatus, 0xffff);
+       RTL_W16(IntrMask, 0x0000);
+       RTL_W16(IntrStatus, tp->intr_event);
+       RTL_R8(ChipCmd);
 }
 
 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -3424,22 +3426,24 @@ static const struct rtl_cfg_info {
 };
 
 /* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
                            const struct rtl_cfg_info *cfg)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
        unsigned msi = 0;
        u8 cfg2;
 
        cfg2 = RTL_R8(Config2) & ~MSIEnable;
        if (cfg->features & RTL_FEATURE_MSI) {
-               if (pci_enable_msi(pdev)) {
-                       dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+               if (pci_enable_msi(tp->pci_dev)) {
+                       netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
                } else {
                        cfg2 |= MSIEnable;
                        msi = RTL_FEATURE_MSI;
                }
        }
-       RTL_W8(Config2, cfg2);
+       if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+               RTL_W8(Config2, cfg2);
        return msi;
 }
 
@@ -3933,8 +3937,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
                        break;
                udelay(100);
        }
-
-       rtl8169_init_ring_indexes(tp);
 }
 
 static int __devinit
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                tp->features |= RTL_FEATURE_WOL;
        if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
                tp->features |= RTL_FEATURE_WOL;
-       tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+       tp->features |= rtl_try_msi(tp, cfg);
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
        if (rtl_tbi_enabled(tp)) {
@@ -4339,7 +4341,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
        void __iomem *ioaddr = tp->mmio_addr;
 
        /* Disable interrupts */
-       rtl8169_irq_mask_and_ack(ioaddr);
+       rtl8169_irq_mask_and_ack(tp);
 
        rtl_rx_close(tp);
 
@@ -4885,8 +4887,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
        RTL_W16(IntrMitigate, 0x5151);
 
        /* Work around for RxFIFO overflow. */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
-           tp->mac_version == RTL_GIGA_MAC_VER_22) {
+       if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
                tp->intr_event |= RxFIFOOver | PCSTimeout;
                tp->intr_event &= ~RxOverflow;
        }
@@ -5076,6 +5077,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
        void __iomem *ioaddr = tp->mmio_addr;
        struct pci_dev *pdev = tp->pci_dev;
 
+       if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
+               tp->intr_event &= ~RxFIFOOver;
+               tp->napi_event &= ~RxFIFOOver;
+       }
+
        if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
            tp->mac_version == RTL_GIGA_MAC_VER_16) {
                int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5348,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
        /* Wait for any pending NAPI task to complete */
        napi_disable(&tp->napi);
 
-       rtl8169_irq_mask_and_ack(ioaddr);
+       rtl8169_irq_mask_and_ack(tp);
 
        tp->intr_mask = 0xffff;
        RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5395,16 @@ static void rtl8169_reset_task(struct work_struct *work)
        if (!netif_running(dev))
                goto out_unlock;
 
+       rtl8169_hw_reset(tp);
+
        rtl8169_wait_for_quiescence(dev);
 
        for (i = 0; i < NUM_RX_DESC; i++)
                rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
 
        rtl8169_tx_clear(tp);
+       rtl8169_init_ring_indexes(tp);
 
-       rtl8169_hw_reset(tp);
        rtl_hw_start(dev);
        netif_wake_queue(dev);
        rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5415,6 @@ out_unlock:
 
 static void rtl8169_tx_timeout(struct net_device *dev)
 {
-       struct rtl8169_private *tp = netdev_priv(dev);
-
-       rtl8169_hw_reset(tp);
-
-       /* Let's wait a bit while any (async) irq lands on */
        rtl8169_schedule_work(dev, rtl8169_reset_task);
 }
 
@@ -5804,6 +5807,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
         */
        status = RTL_R16(IntrStatus);
        while (status && status != 0xffff) {
+               status &= tp->intr_event;
+               if (!status)
+                       break;
+
                handled = 1;
 
                /* Handle all of the error cases first. These will reset
@@ -5818,27 +5825,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                        switch (tp->mac_version) {
                        /* Work around for rx fifo overflow */
                        case RTL_GIGA_MAC_VER_11:
-                       case RTL_GIGA_MAC_VER_22:
-                       case RTL_GIGA_MAC_VER_26:
                                netif_stop_queue(dev);
                                rtl8169_tx_timeout(dev);
                                goto done;
-                       /* Testers needed. */
-                       case RTL_GIGA_MAC_VER_17:
-                       case RTL_GIGA_MAC_VER_19:
-                       case RTL_GIGA_MAC_VER_20:
-                       case RTL_GIGA_MAC_VER_21:
-                       case RTL_GIGA_MAC_VER_23:
-                       case RTL_GIGA_MAC_VER_24:
-                       case RTL_GIGA_MAC_VER_27:
-                       case RTL_GIGA_MAC_VER_28:
-                       case RTL_GIGA_MAC_VER_31:
-                       /* Experimental science. Pktgen proof. */
-                       case RTL_GIGA_MAC_VER_12:
-                       case RTL_GIGA_MAC_VER_25:
-                               if (status == RxFIFOOver)
-                                       goto done;
-                               break;
                        default:
                                break;
                        }
index 14e134d..e43702f 100644 (file)
@@ -1336,7 +1336,8 @@ static int efx_probe_nic(struct efx_nic *efx)
        if (efx->n_channels > 1)
                get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
        for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
-               efx->rx_indir_table[i] = i % efx->n_rx_channels;
+               efx->rx_indir_table[i] =
+                       ethtool_rxfh_indir_default(i, efx->n_rx_channels);
 
        efx_set_channels(efx);
        netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
@@ -2235,9 +2236,9 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
                    PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
         .driver_data = (unsigned long) &falcon_b0_nic_type},
-       {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
+       {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803),  /* SFC9020 */
         .driver_data = (unsigned long) &siena_a0_nic_type},
-       {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
+       {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813),  /* SFL9021 */
         .driver_data = (unsigned long) &siena_a0_nic_type},
        {0}                     /* end of list */
 };
index 4764793..8a5336d 100644 (file)
 #include "net_driver.h"
 #include "filter.h"
 
-/* PCI IDs */
-#define BETHPAGE_A_P_DEVID      0x0803
-#define SIENA_A_P_DEVID         0x0813
-
 /* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
 #define EFX_MEM_BAR 2
 
index f3cd96d..1be51b2 100644 (file)
@@ -956,40 +956,28 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
        return rc < 0 ? rc : 0;
 }
 
-static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
-                                     struct ethtool_rxfh_indir *indir)
+static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       size_t copy_size =
-               min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
 
-       if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
-               return -EOPNOTSUPP;
+       return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ?
+               0 : ARRAY_SIZE(efx->rx_indir_table));
+}
+
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
 
-       indir->size = ARRAY_SIZE(efx->rx_indir_table);
-       memcpy(indir->ring_index, efx->rx_indir_table,
-              copy_size * sizeof(indir->ring_index[0]));
+       memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
        return 0;
 }
 
 static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
-                                     const struct ethtool_rxfh_indir *indir)
+                                     const u32 *indir)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       size_t i;
-
-       if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
-               return -EOPNOTSUPP;
-
-       /* Validate size and indices */
-       if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
-               return -EINVAL;
-       for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
-               if (indir->ring_index[i] >= efx->n_rx_channels)
-                       return -EINVAL;
 
-       memcpy(efx->rx_indir_table, indir->ring_index,
-              sizeof(efx->rx_indir_table));
+       memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
        efx_nic_push_rx_indir_table(efx);
        return 0;
 }
@@ -1020,6 +1008,7 @@ const struct ethtool_ops efx_ethtool_ops = {
        .reset                  = efx_ethtool_reset,
        .get_rxnfc              = efx_ethtool_get_rxnfc,
        .set_rx_ntuple          = efx_ethtool_set_rx_ntuple,
+       .get_rxfh_indir_size    = efx_ethtool_get_rxfh_indir_size,
        .get_rxfh_indir         = efx_ethtool_get_rxfh_indir,
        .set_rxfh_indir         = efx_ethtool_set_rxfh_indir,
 };
index 97b606b..8ae1ebd 100644 (file)
@@ -610,7 +610,7 @@ static void falcon_stats_complete(struct efx_nic *efx)
        if (!nic_data->stats_pending)
                return;
 
-       nic_data->stats_pending = 0;
+       nic_data->stats_pending = false;
        if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
                rmb(); /* read the done flag before the stats */
                efx->mac_op->update_stats(efx);
index b630448..bc9dcd6 100644 (file)
@@ -496,7 +496,7 @@ static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
                rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
                if (rc)
                        goto out;
-               part->mcdi.updating = 1;
+               part->mcdi.updating = true;
        }
 
        /* The MCDI interface can in fact do multiple erase blocks at once;
@@ -528,7 +528,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
                rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
                if (rc)
                        goto out;
-               part->mcdi.updating = 1;
+               part->mcdi.updating = true;
        }
 
        while (offset < end) {
@@ -553,7 +553,7 @@ static int siena_mtd_sync(struct mtd_info *mtd)
        int rc = 0;
 
        if (part->mcdi.updating) {
-               part->mcdi.updating = 0;
+               part->mcdi.updating = false;
                rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
        }
 
index 955b149..aca3498 100644 (file)
@@ -479,11 +479,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
                if (efx->net_dev->features & NETIF_F_RXHASH)
                        skb->rxhash = efx_rx_buf_hash(eh);
 
-               skb_frag_set_page(skb, 0, page);
-               skb_shinfo(skb)->frags[0].page_offset =
-                       efx_rx_buf_offset(efx, rx_buf);
-               skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len);
-               skb_shinfo(skb)->nr_frags = 1;
+               skb_fill_page_desc(skb, 0, page,
+                                  efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
 
                skb->len = rx_buf->len;
                skb->data_len = rx_buf->len;
index cc2549c..4d5d619 100644 (file)
@@ -232,7 +232,7 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
 static int siena_probe_nic(struct efx_nic *efx)
 {
        struct siena_nic_data *nic_data;
-       bool already_attached = 0;
+       bool already_attached = false;
        efx_oword_t reg;
        int rc;
 
index 22745d7..0364283 100644 (file)
@@ -12,11 +12,36 @@ config STMMAC_ETH
 
 if STMMAC_ETH
 
+config STMMAC_PLATFORM
+       tristate "STMMAC platform bus support"
+       depends on STMMAC_ETH
+       default y
+       ---help---
+         This selects the platform specific bus support for
+         the stmmac device driver. This is the driver used
+         on many embedded STM platforms based on ARM and SuperH
+         processors.
+         If you have a controller with this interface, say Y or M here.
+
+         If unsure, say N.
+
+config STMMAC_PCI
+       tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+       depends on STMMAC_ETH && PCI && EXPERIMENTAL
+       ---help---
+         This is to select the Synopsys DWMAC available on PCI devices,
+         if you have a controller with this interface, say Y or M here.
+
+         This PCI support is tested on XLINX XC2V3000 FF1152AMT0221
+         D1215994A VIRTEX FPGA board.
+
+         If unsure, say N.
+
 config STMMAC_DEBUG_FS
        bool "Enable monitoring via sysFS "
        default n
        depends on STMMAC_ETH && DEBUG_FS
-       -- help
+       ---help---
          The stmmac entry in /sys reports DMA TX/RX rings
          or (if supported) the HW cap register.
 
index d7c4516..bc965ac 100644 (file)
@@ -2,6 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
 stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
 stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
 stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
+stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
+stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
 stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o     \
              dwmac_lib.o dwmac1000_core.o  dwmac1000_dma.o     \
              dwmac100_core.o dwmac100_dma.o enh_desc.o  norm_desc.o \
index 2cc1192..d0b814e 100644 (file)
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#include <linux/etherdevice.h>
 #include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/init.h>
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 #define STMMAC_VLAN_TAG_USED
 #include <linux/if_vlan.h>
@@ -315,5 +319,8 @@ extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
                                unsigned int high, unsigned int low);
 extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
                                unsigned int high, unsigned int low);
+
+extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+
 extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
index e250935..f20aa12 100644 (file)
@@ -238,6 +238,19 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
        writel(data, ioaddr + low);
 }
 
+/* Enable disable MAC RX/TX */
+void stmmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+       u32 value = readl(ioaddr + MAC_CTRL_REG);
+
+       if (enable)
+               value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
+       else
+               value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
+
+       writel(value, ioaddr + MAC_CTRL_REG);
+}
+
 void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
                         unsigned int high, unsigned int low)
 {
index a140a8f..1207400 100644 (file)
@@ -20,7 +20,8 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
-#define DRV_MODULE_VERSION     "Oct_2011"
+#define STMMAC_RESOURCE_NAME   "stmmaceth"
+#define DRV_MODULE_VERSION     "Dec_2011"
 #include <linux/stmmac.h>
 #include <linux/phy.h>
 #include "common.h"
@@ -82,8 +83,18 @@ struct stmmac_priv {
        int hw_cap_support;
 };
 
+extern int phyaddr;
+
 extern int stmmac_mdio_unregister(struct net_device *ndev);
 extern int stmmac_mdio_register(struct net_device *ndev);
 extern void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
+
+int stmmac_freeze(struct net_device *ndev);
+int stmmac_restore(struct net_device *ndev);
+int stmmac_resume(struct net_device *ndev);
+int stmmac_suspend(struct net_device *ndev);
+int stmmac_dvr_remove(struct net_device *ndev);
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+                               struct plat_stmmacenet_data *plat_dat);
index 24c2bf6..3738b47 100644 (file)
        https://bugzilla.stlinux.com/
 *******************************************************************************/
 
-#include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/skbuff.h>
@@ -52,8 +48,6 @@
 #endif
 #include "stmmac.h"
 
-#define STMMAC_RESOURCE_NAME   "stmmaceth"
-
 #undef STMMAC_DEBUG
 /*#define STMMAC_DEBUG*/
 #ifdef STMMAC_DEBUG
@@ -93,7 +87,7 @@ static int debug = -1;                /* -1: default, 0: no output, 16:  all */
 module_param(debug, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
 
-static int phyaddr = -1;
+int phyaddr = -1;
 module_param(phyaddr, int, S_IRUGO);
 MODULE_PARM_DESC(phyaddr, "Physical device address");
 
@@ -141,6 +135,11 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 
 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 
+#ifdef CONFIG_STMMAC_DEBUG_FS
+static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_exit_fs(void);
+#endif
+
 /**
  * stmmac_verify_args - verify the driver parameters.
  * Description: it verifies if some wrong parameter is passed to the driver.
@@ -345,22 +344,6 @@ static int stmmac_init_phy(struct net_device *dev)
        return 0;
 }
 
-static inline void stmmac_enable_mac(void __iomem *ioaddr)
-{
-       u32 value = readl(ioaddr + MAC_CTRL_REG);
-
-       value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
-       writel(value, ioaddr + MAC_CTRL_REG);
-}
-
-static inline void stmmac_disable_mac(void __iomem *ioaddr)
-{
-       u32 value = readl(ioaddr + MAC_CTRL_REG);
-
-       value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
-       writel(value, ioaddr + MAC_CTRL_REG);
-}
-
 /**
  * display_ring
  * @p: pointer to the ring.
@@ -886,6 +869,53 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
        return hw_cap;
 }
 
+/**
+ * stmmac_mac_device_setup
+ * @dev : device pointer
+ * Description: this is to attach the GMAC or MAC 10/100
+ * main core structures that will be completed during the
+ * open step.
+ */
+static int stmmac_mac_device_setup(struct net_device *dev)
+{
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       struct mac_device_info *device;
+
+       if (priv->plat->has_gmac)
+               device = dwmac1000_setup(priv->ioaddr);
+       else
+               device = dwmac100_setup(priv->ioaddr);
+
+       if (!device)
+               return -ENOMEM;
+
+       priv->hw = device;
+       priv->hw->ring = &ring_mode_ops;
+
+       if (device_can_wakeup(priv->device)) {
+               priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+               enable_irq_wake(priv->wol_irq);
+       }
+
+       return 0;
+}
+
+static void stmmac_check_ether_addr(struct stmmac_priv *priv)
+{
+       /* verify if the MAC address is valid, in case of failures it
+        * generates a random MAC address */
+       if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+               priv->hw->mac->get_umac_addr((void __iomem *)
+                                            priv->dev->base_addr,
+                                            priv->dev->dev_addr, 0);
+               if  (!is_valid_ether_addr(priv->dev->dev_addr))
+                       random_ether_addr(priv->dev->dev_addr);
+       }
+       pr_warning("%s: device MAC address %pM\n", priv->dev->name,
+                                                  priv->dev->dev_addr);
+}
+
 /**
  *  stmmac_open - open entry point of the driver
  *  @dev : pointer to the device structure.
@@ -900,18 +930,28 @@ static int stmmac_open(struct net_device *dev)
        struct stmmac_priv *priv = netdev_priv(dev);
        int ret;
 
-       /* Check that the MAC address is valid.  If its not, refuse
-        * to bring the device up. The user must specify an
-        * address using the following linux command:
-        *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
-       if (!is_valid_ether_addr(dev->dev_addr)) {
-               random_ether_addr(dev->dev_addr);
-               pr_warning("%s: generated random MAC address %pM\n", dev->name,
-                       dev->dev_addr);
-       }
+       /* MAC HW device setup */
+       ret = stmmac_mac_device_setup(dev);
+       if (ret < 0)
+               return ret;
+
+       stmmac_check_ether_addr(priv);
 
        stmmac_verify_args();
 
+       /* Override with kernel parameters if supplied XXX CRS XXX
+        * this needs to have multiple instances */
+       if ((phyaddr >= 0) && (phyaddr <= 31))
+               priv->plat->phy_addr = phyaddr;
+
+       /* MDIO bus Registration */
+       ret = stmmac_mdio_register(dev);
+       if (ret < 0) {
+               pr_debug("%s: MDIO bus (id: %d) registration failed",
+                        __func__, priv->plat->bus_id);
+               return ret;
+       }
+
 #ifdef CONFIG_STMMAC_TIMER
        priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
        if (unlikely(priv->tm == NULL)) {
@@ -1008,7 +1048,7 @@ static int stmmac_open(struct net_device *dev)
        }
 
        /* Enable the MAC Rx/Tx */
-       stmmac_enable_mac(priv->ioaddr);
+       stmmac_set_mac(priv->ioaddr, true);
 
        /* Set the HW DMA mode and the COE */
        stmmac_dma_operation_mode(priv);
@@ -1019,6 +1059,11 @@ static int stmmac_open(struct net_device *dev)
 
        stmmac_mmc_setup(priv);
 
+#ifdef CONFIG_STMMAC_DEBUG_FS
+       ret = stmmac_init_fs(dev);
+       if (ret < 0)
+               pr_warning("\tFailed debugFS registration");
+#endif
        /* Start the ball rolling... */
        DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
        priv->hw->dma->start_tx(priv->ioaddr);
@@ -1091,10 +1136,15 @@ static int stmmac_release(struct net_device *dev)
        free_dma_desc_resources(priv);
 
        /* Disable the MAC Rx/Tx */
-       stmmac_disable_mac(priv->ioaddr);
+       stmmac_set_mac(priv->ioaddr, false);
 
        netif_carrier_off(dev);
 
+#ifdef CONFIG_STMMAC_DEBUG_FS
+       stmmac_exit_fs();
+#endif
+       stmmac_mdio_unregister(dev);
+
        return 0;
 }
 
@@ -1739,28 +1789,41 @@ static const struct net_device_ops stmmac_netdev_ops = {
 };
 
 /**
- * stmmac_probe - Initialization of the adapter .
- * @dev : device pointer
- * Description: The function initializes the network device structure for
- * the STMMAC driver. It also calls the low level routines
- * in order to init the HW (i.e. the DMA engine)
+ * stmmac_dvr_probe
+ * @device: device pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
  */
-static int stmmac_probe(struct net_device *dev)
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+                                       struct plat_stmmacenet_data *plat_dat)
 {
        int ret = 0;
-       struct stmmac_priv *priv = netdev_priv(dev);
+       struct net_device *ndev = NULL;
+       struct stmmac_priv *priv;
+
+       ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+       if (!ndev) {
+               pr_err("%s: ERROR: allocating the device\n", __func__);
+               return NULL;
+       }
+
+       SET_NETDEV_DEV(ndev, device);
+
+       priv = netdev_priv(ndev);
+       priv->device = device;
+       priv->dev = ndev;
 
-       ether_setup(dev);
+       ether_setup(ndev);
 
-       dev->netdev_ops = &stmmac_netdev_ops;
-       stmmac_set_ethtool_ops(dev);
+       ndev->netdev_ops = &stmmac_netdev_ops;
+       stmmac_set_ethtool_ops(ndev);
 
-       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-       dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
-       dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+       ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+       ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
 #ifdef STMMAC_VLAN_TAG_USED
        /* Both mac100 and gmac support receive VLAN tag detection */
-       dev->features |= NETIF_F_HW_VLAN_RX;
+       ndev->features |= NETIF_F_HW_VLAN_RX;
 #endif
        priv->msg_enable = netif_msg_init(debug, default_msg_level);
 
@@ -1768,248 +1831,60 @@ static int stmmac_probe(struct net_device *dev)
                priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
 
        priv->pause = pause;
-       netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
-
-       /* Get the MAC address */
-       priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
-                                    dev->dev_addr, 0);
-
-       if (!is_valid_ether_addr(dev->dev_addr))
-               pr_warning("\tno valid MAC address;"
-                       "please, use ifconfig or nwhwconfig!\n");
+       priv->plat = plat_dat;
+       netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
 
        spin_lock_init(&priv->lock);
        spin_lock_init(&priv->tx_lock);
 
-       ret = register_netdev(dev);
+       ret = register_netdev(ndev);
        if (ret) {
                pr_err("%s: ERROR %i registering the device\n",
                       __func__, ret);
-               return -ENODEV;
+               goto error;
        }
 
        DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
-           dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
-           (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
-
-       return ret;
-}
-
-/**
- * stmmac_mac_device_setup
- * @dev : device pointer
- * Description: select and initialise the mac device (mac100 or Gmac).
- */
-static int stmmac_mac_device_setup(struct net_device *dev)
-{
-       struct stmmac_priv *priv = netdev_priv(dev);
+           ndev->name, (ndev->features & NETIF_F_SG) ? "on" : "off",
+           (ndev->features & NETIF_F_IP_CSUM) ? "on" : "off");
 
-       struct mac_device_info *device;
-
-       if (priv->plat->has_gmac) {
-               dev->priv_flags |= IFF_UNICAST_FLT;
-               device = dwmac1000_setup(priv->ioaddr);
-       } else {
-               device = dwmac100_setup(priv->ioaddr);
-       }
-
-       if (!device)
-               return -ENOMEM;
-
-       priv->hw = device;
-       priv->hw->ring = &ring_mode_ops;
-
-       if (device_can_wakeup(priv->device)) {
-               priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
-               enable_irq_wake(priv->wol_irq);
-       }
-
-       return 0;
-}
-
-/**
- * stmmac_dvr_probe
- * @pdev: platform device pointer
- * Description: the driver is initialized through platform_device.
- */
-static int stmmac_dvr_probe(struct platform_device *pdev)
-{
-       int ret = 0;
-       struct resource *res;
-       void __iomem *addr = NULL;
-       struct net_device *ndev = NULL;
-       struct stmmac_priv *priv = NULL;
-       struct plat_stmmacenet_data *plat_dat;
-
-       pr_info("STMMAC driver:\n\tplatform registration... ");
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENODEV;
-       pr_info("\tdone!\n");
-
-       if (!request_mem_region(res->start, resource_size(res),
-                               pdev->name)) {
-               pr_err("%s: ERROR: memory allocation failed"
-                      "cannot get the I/O addr 0x%x\n",
-                      __func__, (unsigned int)res->start);
-               return -EBUSY;
-       }
-
-       addr = ioremap(res->start, resource_size(res));
-       if (!addr) {
-               pr_err("%s: ERROR: memory mapping failed\n", __func__);
-               ret = -ENOMEM;
-               goto out_release_region;
-       }
-
-       ndev = alloc_etherdev(sizeof(struct stmmac_priv));
-       if (!ndev) {
-               pr_err("%s: ERROR: allocating the device\n", __func__);
-               ret = -ENOMEM;
-               goto out_unmap;
-       }
-
-       SET_NETDEV_DEV(ndev, &pdev->dev);
-
-       /* Get the MAC information */
-       ndev->irq = platform_get_irq_byname(pdev, "macirq");
-       if (ndev->irq == -ENXIO) {
-               pr_err("%s: ERROR: MAC IRQ configuration "
-                      "information not found\n", __func__);
-               ret = -ENXIO;
-               goto out_free_ndev;
-       }
-
-       priv = netdev_priv(ndev);
-       priv->device = &(pdev->dev);
-       priv->dev = ndev;
-       plat_dat = pdev->dev.platform_data;
-
-       priv->plat = plat_dat;
+       return priv;
 
-       priv->ioaddr = addr;
+error:
+       netif_napi_del(&priv->napi);
 
-       /*
-        * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
-        * The external wake up irq can be passed through the platform code
-        * named as "eth_wake_irq"
-        *
-        * In case the wake up interrupt is not passed from the platform
-        * so the driver will continue to use the mac irq (ndev->irq)
-        */
-       priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
-       if (priv->wol_irq == -ENXIO)
-               priv->wol_irq = ndev->irq;
-
-       platform_set_drvdata(pdev, ndev);
-
-       /* Set the I/O base addr */
-       ndev->base_addr = (unsigned long)addr;
-
-       /* Custom initialisation */
-       if (priv->plat->init) {
-               ret = priv->plat->init(pdev);
-               if (unlikely(ret))
-                       goto out_free_ndev;
-       }
-
-       /* MAC HW device detection */
-       ret = stmmac_mac_device_setup(ndev);
-       if (ret < 0)
-               goto out_plat_exit;
-
-       /* Network Device Registration */
-       ret = stmmac_probe(ndev);
-       if (ret < 0)
-               goto out_plat_exit;
-
-       /* Override with kernel parameters if supplied XXX CRS XXX
-        * this needs to have multiple instances */
-       if ((phyaddr >= 0) && (phyaddr <= 31))
-               priv->plat->phy_addr = phyaddr;
-
-       pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
-              "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
-              pdev->id, ndev->irq, addr);
-
-       /* MDIO bus Registration */
-       pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
-       ret = stmmac_mdio_register(ndev);
-       if (ret < 0)
-               goto out_unregister;
-       pr_debug("registered!\n");
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
-       ret = stmmac_init_fs(ndev);
-       if (ret < 0)
-               pr_warning("\tFailed debugFS registration");
-#endif
-
-       return 0;
-
-out_unregister:
        unregister_netdev(ndev);
-out_plat_exit:
-       if (priv->plat->exit)
-               priv->plat->exit(pdev);
-out_free_ndev:
        free_netdev(ndev);
-       platform_set_drvdata(pdev, NULL);
-out_unmap:
-       iounmap(addr);
-out_release_region:
-       release_mem_region(res->start, resource_size(res));
 
-       return ret;
+       return NULL;
 }
 
 /**
  * stmmac_dvr_remove
- * @pdev: platform device pointer
+ * @ndev: net device pointer
  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
- * changes the link status, releases the DMA descriptor rings,
- * unregisters the MDIO bus and unmaps the allocated memory.
+ * changes the link status, releases the DMA descriptor rings.
  */
-static int stmmac_dvr_remove(struct platform_device *pdev)
+int stmmac_dvr_remove(struct net_device *ndev)
 {
-       struct net_device *ndev = platform_get_drvdata(pdev);
        struct stmmac_priv *priv = netdev_priv(ndev);
-       struct resource *res;
 
        pr_info("%s:\n\tremoving driver", __func__);
 
        priv->hw->dma->stop_rx(priv->ioaddr);
        priv->hw->dma->stop_tx(priv->ioaddr);
 
-       stmmac_disable_mac(priv->ioaddr);
-
+       stmmac_set_mac(priv->ioaddr, false);
        netif_carrier_off(ndev);
-
-       stmmac_mdio_unregister(ndev);
-
-       if (priv->plat->exit)
-               priv->plat->exit(pdev);
-
-       platform_set_drvdata(pdev, NULL);
        unregister_netdev(ndev);
-
-       iounmap((void *)priv->ioaddr);
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
-       stmmac_exit_fs();
-#endif
-
        free_netdev(ndev);
 
        return 0;
 }
 
 #ifdef CONFIG_PM
-static int stmmac_suspend(struct device *dev)
+int stmmac_suspend(struct net_device *ndev)
 {
-       struct net_device *ndev = dev_get_drvdata(dev);
        struct stmmac_priv *priv = netdev_priv(ndev);
        int dis_ic = 0;
 
@@ -2043,15 +1918,14 @@ static int stmmac_suspend(struct device *dev)
        if (device_may_wakeup(priv->device))
                priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
        else
-               stmmac_disable_mac(priv->ioaddr);
+               stmmac_set_mac(priv->ioaddr, false);
 
        spin_unlock(&priv->lock);
        return 0;
 }
 
-static int stmmac_resume(struct device *dev)
+int stmmac_resume(struct net_device *ndev)
 {
-       struct net_device *ndev = dev_get_drvdata(dev);
        struct stmmac_priv *priv = netdev_priv(ndev);
 
        if (!netif_running(ndev))
@@ -2070,7 +1944,7 @@ static int stmmac_resume(struct device *dev)
        netif_device_attach(ndev);
 
        /* Enable the MAC and DMA */
-       stmmac_enable_mac(priv->ioaddr);
+       stmmac_set_mac(priv->ioaddr, true);
        priv->hw->dma->start_tx(priv->ioaddr);
        priv->hw->dma->start_rx(priv->ioaddr);
 
@@ -2090,47 +1964,23 @@ static int stmmac_resume(struct device *dev)
        return 0;
 }
 
-static int stmmac_freeze(struct device *dev)
+int stmmac_freeze(struct net_device *ndev)
 {
-       struct net_device *ndev = dev_get_drvdata(dev);
-
        if (!ndev || !netif_running(ndev))
                return 0;
 
        return stmmac_release(ndev);
 }
 
-static int stmmac_restore(struct device *dev)
+int stmmac_restore(struct net_device *ndev)
 {
-       struct net_device *ndev = dev_get_drvdata(dev);
-
        if (!ndev || !netif_running(ndev))
                return 0;
 
        return stmmac_open(ndev);
 }
-
-static const struct dev_pm_ops stmmac_pm_ops = {
-       .suspend = stmmac_suspend,
-       .resume = stmmac_resume,
-       .freeze = stmmac_freeze,
-       .thaw = stmmac_restore,
-       .restore = stmmac_restore,
-};
-#else
-static const struct dev_pm_ops stmmac_pm_ops;
 #endif /* CONFIG_PM */
 
-static struct platform_driver stmmac_driver = {
-       .probe = stmmac_dvr_probe,
-       .remove = stmmac_dvr_remove,
-       .driver = {
-               .name = STMMAC_RESOURCE_NAME,
-               .owner = THIS_MODULE,
-               .pm = &stmmac_pm_ops,
-       },
-};
-
 #ifndef MODULE
 static int __init stmmac_cmdline_opt(char *str)
 {
@@ -2190,8 +2040,6 @@ err:
 __setup("stmmaceth=", stmmac_cmdline_opt);
 #endif
 
-module_platform_driver(stmmac_driver);
-
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
 MODULE_LICENSE("GPL");
index 9c3b9d5..51f4412 100644 (file)
@@ -109,6 +109,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
  */
 static int stmmac_mdio_reset(struct mii_bus *bus)
 {
+#if defined(CONFIG_STMMAC_PLATFORM)
        struct net_device *ndev = bus->priv;
        struct stmmac_priv *priv = netdev_priv(ndev);
        unsigned int mii_address = priv->hw->mii.addr;
@@ -123,7 +124,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
         * on MDC, so perform a dummy mdio read.
         */
        writel(0, priv->ioaddr + mii_address);
-
+#endif
        return 0;
 }
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
new file mode 100644 (file)
index 0000000..54a819a
--- /dev/null
@@ -0,0 +1,221 @@
+/*******************************************************************************
+  This contains the functions to handle the pci driver.
+
+  Copyright (C) 2011-2012  Vayavya Labs Pvt Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include "stmmac.h"
+
+struct plat_stmmacenet_data plat_dat;
+struct stmmac_mdio_bus_data mdio_data;
+
+static void stmmac_default_data(void)
+{
+       memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+       plat_dat.bus_id = 1;
+       plat_dat.phy_addr = 0;
+       plat_dat.interface = PHY_INTERFACE_MODE_GMII;
+       plat_dat.pbl = 32;
+       plat_dat.clk_csr = 2;   /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+       plat_dat.has_gmac = 1;
+       plat_dat.force_sf_dma_mode = 1;
+
+       mdio_data.bus_id = 1;
+       mdio_data.phy_reset = NULL;
+       mdio_data.phy_mask = 0;
+       plat_dat.mdio_bus_data = &mdio_data;
+}
+
+/**
+ * stmmac_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int __devinit stmmac_pci_probe(struct pci_dev *pdev,
+                                     const struct pci_device_id *id)
+{
+       int ret = 0;
+       void __iomem *addr = NULL;
+       struct stmmac_priv *priv = NULL;
+       int i;
+
+       /* Enable pci device */
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               pr_err("%s : ERROR: failed to enable %s device\n", __func__,
+                      pci_name(pdev));
+               return ret;
+       }
+       if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) {
+               pr_err("%s: ERROR: failed to get PCI region\n", __func__);
+               ret = -ENODEV;
+               goto err_out_req_reg_failed;
+       }
+
+       /* Get the base address of device */
+       for (i = 0; i <= 5; i++) {
+               if (pci_resource_len(pdev, i) == 0)
+                       continue;
+               addr = pci_iomap(pdev, i, 0);
+               if (addr == NULL) {
+                       pr_err("%s: ERROR: cannot map regiser memory, aborting",
+                              __func__);
+                       ret = -EIO;
+                       goto err_out_map_failed;
+               }
+               break;
+       }
+       pci_set_master(pdev);
+
+       stmmac_default_data();
+
+       priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat);
+       if (!priv) {
+               pr_err("%s: main drivr probe failed", __func__);
+               goto err_out;
+       }
+       priv->ioaddr = addr;
+       priv->dev->base_addr = (unsigned long)addr;
+       priv->dev->irq = pdev->irq;
+       priv->wol_irq = pdev->irq;
+
+       pci_set_drvdata(pdev, priv->dev);
+
+       pr_debug("STMMAC platform driver registration completed");
+
+       return 0;
+
+err_out:
+       pci_clear_master(pdev);
+err_out_map_failed:
+       pci_release_regions(pdev);
+err_out_req_reg_failed:
+       pci_disable_device(pdev);
+
+       return ret;
+}
+
+/**
+ * stmmac_dvr_remove
+ *
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void __devexit stmmac_pci_remove(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+
+       stmmac_dvr_remove(ndev);
+
+       pci_set_drvdata(pdev, NULL);
+       pci_iounmap(pdev, priv->ioaddr);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       int ret;
+
+       ret = stmmac_suspend(ndev);
+       pci_save_state(pdev);
+       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+       return ret;
+}
+
+static int stmmac_pci_resume(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+
+       return stmmac_resume(ndev);
+}
+#endif
+
+#define STMMAC_VENDOR_ID 0x700
+#define STMMAC_DEVICE_ID 0x1108
+
+static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
+       {
+       PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, {
+       }
+};
+
+MODULE_DEVICE_TABLE(pci, stmmac_id_table);
+
+static struct pci_driver stmmac_driver = {
+       .name = STMMAC_RESOURCE_NAME,
+       .id_table = stmmac_id_table,
+       .probe = stmmac_pci_probe,
+       .remove = __devexit_p(stmmac_pci_remove),
+#ifdef CONFIG_PM
+       .suspend = stmmac_pci_suspend,
+       .resume = stmmac_pci_resume,
+#endif
+};
+
+/**
+ * stmmac_init_module - Entry point for the driver
+ * Description: This function is the entry point for the driver.
+ */
+static int __init stmmac_init_module(void)
+{
+       int ret;
+
+       ret = pci_register_driver(&stmmac_driver);
+       if (ret < 0)
+               pr_err("%s: ERROR: driver registration failed\n", __func__);
+
+       return ret;
+}
+
+/**
+ * stmmac_cleanup_module - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver.
+ */
+static void __exit stmmac_cleanup_module(void)
+{
+       pci_unregister_driver(&stmmac_driver);
+}
+
+module_init(stmmac_init_module);
+module_exit(stmmac_cleanup_module);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
new file mode 100644 (file)
index 0000000..7b1594f
--- /dev/null
@@ -0,0 +1,198 @@
+/*******************************************************************************
+  This contains the functions to handle the platform driver.
+
+  Copyright (C) 2007-2011  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include "stmmac.h"
+
+/**
+ * stmmac_pltfr_probe
+ * @pdev: platform device pointer
+ * Description: platform_device probe function. It allocates
+ * the necessary resources and invokes the main to init
+ * the net device, register the mdio bus etc.
+ */
+static int stmmac_pltfr_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+       struct resource *res;
+       void __iomem *addr = NULL;
+       struct stmmac_priv *priv = NULL;
+       struct plat_stmmacenet_data *plat_dat;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+
+       if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+               pr_err("%s: ERROR: memory allocation failed"
+                      "cannot get the I/O addr 0x%x\n",
+                      __func__, (unsigned int)res->start);
+               return -EBUSY;
+       }
+
+       addr = ioremap(res->start, resource_size(res));
+       if (!addr) {
+               pr_err("%s: ERROR: memory mapping failed", __func__);
+               ret = -ENOMEM;
+               goto out_release_region;
+       }
+       plat_dat = pdev->dev.platform_data;
+       priv = stmmac_dvr_probe(&(pdev->dev), plat_dat);
+       if (!priv) {
+               pr_err("%s: main drivr probe failed", __func__);
+               goto out_release_region;
+       }
+
+       priv->ioaddr = addr;
+       /* Set the I/O base addr */
+       priv->dev->base_addr = (unsigned long)addr;
+
+       /* Get the MAC information */
+       priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
+       if (priv->dev->irq == -ENXIO) {
+               pr_err("%s: ERROR: MAC IRQ configuration "
+                      "information not found\n", __func__);
+               ret = -ENXIO;
+               goto out_unmap;
+       }
+
+       /*
+        * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+        * The external wake up irq can be passed through the platform code
+        * named as "eth_wake_irq"
+        *
+        * In case the wake up interrupt is not passed from the platform
+        * so the driver will continue to use the mac irq (ndev->irq)
+        */
+       priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+       if (priv->wol_irq == -ENXIO)
+               priv->wol_irq = priv->dev->irq;
+
+       platform_set_drvdata(pdev, priv->dev);
+
+       /* Custom initialisation */
+       if (priv->plat->init) {
+               ret = priv->plat->init(pdev);
+               if (unlikely(ret))
+                       goto out_unmap;
+       }
+
+       pr_debug("STMMAC platform driver registration completed");
+
+       return 0;
+
+out_unmap:
+       iounmap(addr);
+       platform_set_drvdata(pdev, NULL);
+
+out_release_region:
+       release_mem_region(res->start, resource_size(res));
+
+       return ret;
+}
+
+/**
+ * stmmac_pltfr_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+static int stmmac_pltfr_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       struct resource *res;
+       int ret = stmmac_dvr_remove(ndev);
+
+       if (priv->plat->exit)
+               priv->plat->exit(pdev);
+
+       if (priv->plat->exit)
+               priv->plat->exit(pdev);
+
+       platform_set_drvdata(pdev, NULL);
+
+       iounmap((void *)priv->ioaddr);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(res->start, resource_size(res));
+
+       return ret;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pltfr_suspend(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return stmmac_suspend(ndev);
+}
+
+static int stmmac_pltfr_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return stmmac_resume(ndev);
+}
+
+int stmmac_pltfr_freeze(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return stmmac_freeze(ndev);
+}
+
+int stmmac_pltfr_restore(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return stmmac_restore(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
+       .suspend = stmmac_pltfr_suspend,
+       .resume = stmmac_pltfr_resume,
+       .freeze = stmmac_pltfr_freeze,
+       .thaw = stmmac_pltfr_restore,
+       .restore = stmmac_pltfr_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pltfr_pm_ops;
+#endif /* CONFIG_PM */
+
+static struct platform_driver stmmac_driver = {
+       .probe = stmmac_pltfr_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+                  .name = STMMAC_RESOURCE_NAME,
+                  .owner = THIS_MODULE,
+                  .pm = &stmmac_pltfr_pm_ops,
+                  },
+};
+
+module_platform_driver(stmmac_driver);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
index eebd52f..09c5186 100644 (file)
@@ -2850,7 +2850,7 @@ err_out:
 static int is_quattro_p(struct pci_dev *pdev)
 {
        struct pci_dev *busdev = pdev->bus->self;
-       struct list_head *tmp;
+       struct pci_dev *this_pdev;
        int n_hmes;
 
        if (busdev == NULL ||
@@ -2859,15 +2859,10 @@ static int is_quattro_p(struct pci_dev *pdev)
                return 0;
 
        n_hmes = 0;
-       tmp = pdev->bus->devices.next;
-       while (tmp != &pdev->bus->devices) {
-               struct pci_dev *this_pdev = pci_dev_b(tmp);
-
+       list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
                if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
                    this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
                        n_hmes++;
-
-               tmp = tmp->next;
        }
 
        if (n_hmes != 4)
index 3a90af6..4b19e9b 100644 (file)
@@ -727,9 +727,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
  * @ndev network device
  * @vid  VLAN vid to add
  */
-static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
+static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
 {
        __bdx_vlan_rx_vid(ndev, vid, 1);
+       return 0;
 }
 
 /*
@@ -737,9 +738,10 @@ static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
  * @ndev network device
  * @vid  VLAN vid to kill
  */
-static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
+static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
 {
        __bdx_vlan_rx_vid(ndev, vid, 0);
+       return 0;
 }
 
 /**
index dca9d33..c97d2f5 100644 (file)
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
        chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
 
        /* handle completed packets */
+       spin_unlock_irqrestore(&chan->lock, flags);
        do {
                ret = __cpdma_chan_process(chan);
                if (ret < 0)
                        break;
        } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+       spin_lock_irqsave(&chan->lock, flags);
 
        /* remaining packets haven't been tx/rx'ed, clean them up */
        while (chan->head) {
index 10826d8..6b75063 100644 (file)
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
                goto done;
 
        /* Re-enable the ingress interrupt. */
-       enable_percpu_irq(priv->intr_id);
+       enable_percpu_irq(priv->intr_id, 0);
 
        /* HACK: Avoid the "rotting packet" problem (see above). */
        if (qup->__packet_receive_read !=
@@ -1256,7 +1256,7 @@ static void tile_net_stop_aux(struct net_device *dev)
                          sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
                panic("Failed to stop LIPP/LEPP!\n");
 
-       priv->partly_opened = 0;
+       priv->partly_opened = false;
 }
 
 
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
        info->napi_enabled = true;
 
        /* Enable the ingress interrupt. */
-       enable_percpu_irq(priv->intr_id);
+       enable_percpu_irq(priv->intr_id, 0);
 }
 
 
@@ -1507,7 +1507,7 @@ static int tile_net_open(struct net_device *dev)
                       priv->network_cpus_count, priv->network_cpus_credits);
 #endif
 
-               priv->partly_opened = 1;
+               priv->partly_opened = true;
 
        } else {
                /* FIXME: Is this possible? */
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
        for (i = 0; i < sh->nr_frags; i++) {
 
                skb_frag_t *f = &sh->frags[i];
-               unsigned long pfn = page_to_pfn(f->page);
+               unsigned long pfn = page_to_pfn(skb_frag_page(f));
 
                /* FIXME: Compute "hash_for_home" properly. */
                /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
                /* FIXME: Hmmm. */
                if (!hash_default) {
                        void *va = pfn_to_kaddr(pfn) + f->page_offset;
-                       BUG_ON(PageHighMem(f->page));
+                       BUG_ON(PageHighMem(skb_frag_page(f)));
                        finv_buffer_remote(va, f->size, 0);
                }
 
index 5587ecd..5c4983b 100644 (file)
@@ -35,6 +35,7 @@
 #define DRV_VERSION    "1.5.0"
 #define DRV_RELDATE    "2010-10-09"
 
+#include <linux/types.h>
 
 /* A few user-configurable values.
    These may be modified when a driver module is loaded. */
@@ -55,7 +56,7 @@ static int rx_copybreak;
 
 /* Work-around for broken BIOSes: they are unable to get the chip back out of
    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
-static int avoid_D3;
+static bool avoid_D3;
 
 /*
  * In case you are looking for 'options[]' or 'full_duplex[]', they
@@ -488,8 +489,8 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static const struct ethtool_ops netdev_ethtool_ops;
 static int  rhine_close(struct net_device *dev);
 static void rhine_shutdown (struct pci_dev *pdev);
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
@@ -1261,7 +1262,7 @@ static void rhine_update_vcam(struct net_device *dev)
        rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
 }
 
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct rhine_private *rp = netdev_priv(dev);
 
@@ -1269,9 +1270,10 @@ static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        set_bit(vid, rp->active_vlans);
        rhine_update_vcam(dev);
        spin_unlock_irq(&rp->lock);
+       return 0;
 }
 
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct rhine_private *rp = netdev_priv(dev);
 
@@ -1279,6 +1281,7 @@ static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        clear_bit(vid, rp->active_vlans);
        rhine_update_vcam(dev);
        spin_unlock_irq(&rp->lock);
+       return 0;
 }
 
 static void init_registers(struct net_device *dev)
@@ -2320,7 +2323,7 @@ static int __init rhine_init(void)
 #endif
        if (dmi_check_system(rhine_dmi_table)) {
                /* these BIOSes fail at PXE boot if chip is in D3 */
-               avoid_D3 = 1;
+               avoid_D3 = true;
                pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
        }
        else if (avoid_D3)
index 59bb5fd..4128d6b 100644 (file)
@@ -522,7 +522,7 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 }
 
-static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct velocity_info *vptr = netdev_priv(dev);
 
@@ -530,9 +530,10 @@ static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        set_bit(vid, vptr->active_vlans);
        velocity_init_cam_filter(vptr);
        spin_unlock_irq(&vptr->lock);
+       return 0;
 }
 
-static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct velocity_info *vptr = netdev_priv(dev);
 
@@ -540,6 +541,7 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
        clear_bit(vid, vptr->active_vlans);
        velocity_init_cam_filter(vptr);
        spin_unlock_irq(&vptr->lock);
+       return 0;
 }
 
 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
index 903a77b..f21addb 100644 (file)
@@ -1091,7 +1091,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
 
        of_node_put(np); /* Finished with the DMA node; drop the reference */
 
-       if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+       if (!lp->rx_irq || !lp->tx_irq) {
                dev_err(&op->dev, "could not determine irqs\n");
                rc = -ENOMEM;
                goto err_iounmap_2;
index dca6541..79013e5 100644 (file)
@@ -662,7 +662,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
  */
 static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
 {
-       bool tx_complete = 0;
+       bool tx_complete = false;
        struct net_device *dev = dev_id;
        struct net_local *lp = netdev_priv(dev);
        void __iomem *base_addr = lp->base_addr;
@@ -683,7 +683,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
                tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
                out_be32(base_addr + XEL_TSR_OFFSET, tx_status);
 
-               tx_complete = 1;
+               tx_complete = true;
        }
 
        /* Check if the Transmission for the second buffer is completed */
@@ -695,7 +695,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
                out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
                         tx_status);
 
-               tx_complete = 1;
+               tx_complete = true;
        }
 
        /* If there was a Tx interrupt, call the Tx Handler */
@@ -1129,7 +1129,7 @@ static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
 
        /* Get IRQ for the device */
        rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
-       if (rc == NO_IRQ) {
+       if (!rc) {
                dev_err(dev, "no IRQ found\n");
                return rc;
        }
index b45b2cc..64f403d 100644 (file)
@@ -197,7 +197,7 @@ static char *driver_name = DRIVER_NAME;
 
 static int max_baud = 4000000;
 #ifdef USE_PROBE
-static int do_probe = 0;
+static bool do_probe = false;
 #endif
 
 
index 8b1c348..6c95d40 100644 (file)
@@ -79,7 +79,7 @@ MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
 MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
 MODULE_LICENSE("GPL");
 
-static int smsc_nopnp = 1;
+static bool smsc_nopnp = true;
 module_param_named(nopnp, smsc_nopnp, bool, 0);
 MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true");
 
index 7413497..f2f820c 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_arp.h>
+#include <linux/if_vlan.h>
 #include <linux/if_link.h>
 #include <linux/if_macvlan.h>
 #include <net/rtnetlink.h>
@@ -520,26 +521,23 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
        return stats;
 }
 
-static void macvlan_vlan_rx_add_vid(struct net_device *dev,
+static int macvlan_vlan_rx_add_vid(struct net_device *dev,
                                    unsigned short vid)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
-       const struct net_device_ops *ops = lowerdev->netdev_ops;
 
-       if (ops->ndo_vlan_rx_add_vid)
-               ops->ndo_vlan_rx_add_vid(lowerdev, vid);
+       return vlan_vid_add(lowerdev, vid);
 }
 
-static void macvlan_vlan_rx_kill_vid(struct net_device *dev,
+static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
                                     unsigned short vid)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
-       const struct net_device_ops *ops = lowerdev->netdev_ops;
 
-       if (ops->ndo_vlan_rx_kill_vid)
-               ops->ndo_vlan_rx_kill_vid(lowerdev, vid);
+       vlan_vid_del(lowerdev, vid);
+       return 0;
 }
 
 static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
index 7c88d13..58dc117 100644 (file)
@@ -175,6 +175,14 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
        if (!numvtaps)
                goto out;
 
+       /* Check if we can use flow to select a queue */
+       rxq = skb_get_rxhash(skb);
+       if (rxq) {
+               tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+               if (tap)
+                       goto out;
+       }
+
        if (likely(skb_rx_queue_recorded(skb))) {
                rxq = skb_get_rx_queue(skb);
 
@@ -186,14 +194,6 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
                        goto out;
        }
 
-       /* Check if we can use flow to select a queue */
-       rxq = skb_get_rxhash(skb);
-       if (rxq) {
-               tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
-               if (tap)
-                       goto out;
-       }
-
        /* Everything failed - find first available queue */
        for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
                tap = rcu_dereference(vlan->taps[rxq]);
index a702443..fbdcdf8 100644 (file)
@@ -131,3 +131,7 @@ config MDIO_OCTEON
          If in doubt, say Y.
 
 endif # PHYLIB
+
+config MICREL_KS8995MA
+       tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
+       depends on SPI
index 2333215..e15c83f 100644 (file)
@@ -23,3 +23,4 @@ obj-$(CONFIG_DP83640_PHY)     += dp83640.o
 obj-$(CONFIG_STE10XP)          += ste10Xp.o
 obj-$(CONFIG_MICREL_PHY)       += micrel.o
 obj-$(CONFIG_MDIO_OCTEON)      += mdio-octeon.o
+obj-$(CONFIG_MICREL_KS8995MA)  += spi_ks8995.o
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
new file mode 100644 (file)
index 0000000..116a2dd
--- /dev/null
@@ -0,0 +1,375 @@
+/*
+ * SPI driver for Micrel/Kendin KS8995M ethernet switch
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ *
+ * This file was based on: drivers/spi/at25.c
+ *     Copyright (C) 2006 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include <linux/spi/spi.h>
+
+#define DRV_VERSION            "0.1.1"
+#define DRV_DESC               "Micrel KS8995 Ethernet switch SPI driver"
+
+/* ------------------------------------------------------------------------ */
+
+#define KS8995_REG_ID0         0x00    /* Chip ID0 */
+#define KS8995_REG_ID1         0x01    /* Chip ID1 */
+
+#define KS8995_REG_GC0         0x02    /* Global Control 0 */
+#define KS8995_REG_GC1         0x03    /* Global Control 1 */
+#define KS8995_REG_GC2         0x04    /* Global Control 2 */
+#define KS8995_REG_GC3         0x05    /* Global Control 3 */
+#define KS8995_REG_GC4         0x06    /* Global Control 4 */
+#define KS8995_REG_GC5         0x07    /* Global Control 5 */
+#define KS8995_REG_GC6         0x08    /* Global Control 6 */
+#define KS8995_REG_GC7         0x09    /* Global Control 7 */
+#define KS8995_REG_GC8         0x0a    /* Global Control 8 */
+#define KS8995_REG_GC9         0x0b    /* Global Control 9 */
+
+#define KS8995_REG_PC(p, r)    ((0x10 * p) + r)         /* Port Control */
+#define KS8995_REG_PS(p, r)    ((0x10 * p) + r + 0xe)  /* Port Status */
+
+#define KS8995_REG_TPC0                0x60    /* TOS Priority Control 0 */
+#define KS8995_REG_TPC1                0x61    /* TOS Priority Control 1 */
+#define KS8995_REG_TPC2                0x62    /* TOS Priority Control 2 */
+#define KS8995_REG_TPC3                0x63    /* TOS Priority Control 3 */
+#define KS8995_REG_TPC4                0x64    /* TOS Priority Control 4 */
+#define KS8995_REG_TPC5                0x65    /* TOS Priority Control 5 */
+#define KS8995_REG_TPC6                0x66    /* TOS Priority Control 6 */
+#define KS8995_REG_TPC7                0x67    /* TOS Priority Control 7 */
+
+#define KS8995_REG_MAC0                0x68    /* MAC address 0 */
+#define KS8995_REG_MAC1                0x69    /* MAC address 1 */
+#define KS8995_REG_MAC2                0x6a    /* MAC address 2 */
+#define KS8995_REG_MAC3                0x6b    /* MAC address 3 */
+#define KS8995_REG_MAC4                0x6c    /* MAC address 4 */
+#define KS8995_REG_MAC5                0x6d    /* MAC address 5 */
+
+#define KS8995_REG_IAC0                0x6e    /* Indirect Access Control 0 */
+#define KS8995_REG_IAC1                0x6f    /* Indirect Access Control 0 */
+#define KS8995_REG_IAD7                0x70    /* Indirect Access Data 7 */
+#define KS8995_REG_IAD6                0x71    /* Indirect Access Data 6 */
+#define KS8995_REG_IAD5                0x72    /* Indirect Access Data 5 */
+#define KS8995_REG_IAD4                0x73    /* Indirect Access Data 4 */
+#define KS8995_REG_IAD3                0x74    /* Indirect Access Data 3 */
+#define KS8995_REG_IAD2                0x75    /* Indirect Access Data 2 */
+#define KS8995_REG_IAD1                0x76    /* Indirect Access Data 1 */
+#define KS8995_REG_IAD0                0x77    /* Indirect Access Data 0 */
+
+#define KS8995_REGS_SIZE       0x80
+
+#define ID1_CHIPID_M           0xf
+#define ID1_CHIPID_S           4
+#define ID1_REVISION_M         0x7
+#define ID1_REVISION_S         1
+#define ID1_START_SW           1       /* start the switch */
+
+#define FAMILY_KS8995          0x95
+#define CHIPID_M               0
+
+#define KS8995_CMD_WRITE       0x02U
+#define KS8995_CMD_READ                0x03U
+
+#define KS8995_RESET_DELAY     10 /* usec */
+
+struct ks8995_pdata {
+       /* not yet implemented */
+};
+
+struct ks8995_switch {
+       struct spi_device       *spi;
+       struct mutex            lock;
+       struct ks8995_pdata     *pdata;
+};
+
+static inline u8 get_chip_id(u8 val)
+{
+       return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
+}
+
+static inline u8 get_chip_rev(u8 val)
+{
+       return (val >> ID1_REVISION_S) & ID1_REVISION_M;
+}
+
+/* ------------------------------------------------------------------------ */
+static int ks8995_read(struct ks8995_switch *ks, char *buf,
+                unsigned offset, size_t count)
+{
+       u8 cmd[2];
+       struct spi_transfer t[2];
+       struct spi_message m;
+       int err;
+
+       spi_message_init(&m);
+
+       memset(&t, 0, sizeof(t));
+
+       t[0].tx_buf = cmd;
+       t[0].len = sizeof(cmd);
+       spi_message_add_tail(&t[0], &m);
+
+       t[1].rx_buf = buf;
+       t[1].len = count;
+       spi_message_add_tail(&t[1], &m);
+
+       cmd[0] = KS8995_CMD_READ;
+       cmd[1] = offset;
+
+       mutex_lock(&ks->lock);
+       err = spi_sync(ks->spi, &m);
+       mutex_unlock(&ks->lock);
+
+       return err ? err : count;
+}
+
+
+static int ks8995_write(struct ks8995_switch *ks, char *buf,
+                unsigned offset, size_t count)
+{
+       u8 cmd[2];
+       struct spi_transfer t[2];
+       struct spi_message m;
+       int err;
+
+       spi_message_init(&m);
+
+       memset(&t, 0, sizeof(t));
+
+       t[0].tx_buf = cmd;
+       t[0].len = sizeof(cmd);
+       spi_message_add_tail(&t[0], &m);
+
+       t[1].tx_buf = buf;
+       t[1].len = count;
+       spi_message_add_tail(&t[1], &m);
+
+       cmd[0] = KS8995_CMD_WRITE;
+       cmd[1] = offset;
+
+       mutex_lock(&ks->lock);
+       err = spi_sync(ks->spi, &m);
+       mutex_unlock(&ks->lock);
+
+       return err ? err : count;
+}
+
+static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
+{
+       return (ks8995_read(ks, buf, addr, 1) != 1);
+}
+
+static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
+{
+       char buf = val;
+
+       return (ks8995_write(ks, &buf, addr, 1) != 1);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int ks8995_stop(struct ks8995_switch *ks)
+{
+       return ks8995_write_reg(ks, KS8995_REG_ID1, 0);
+}
+
+static int ks8995_start(struct ks8995_switch *ks)
+{
+       return ks8995_write_reg(ks, KS8995_REG_ID1, 1);
+}
+
+static int ks8995_reset(struct ks8995_switch *ks)
+{
+       int err;
+
+       err = ks8995_stop(ks);
+       if (err)
+               return err;
+
+       udelay(KS8995_RESET_DELAY);
+
+       return ks8995_start(ks);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+       struct device *dev;
+       struct ks8995_switch *ks8995;
+
+       dev = container_of(kobj, struct device, kobj);
+       ks8995 = dev_get_drvdata(dev);
+
+       if (unlikely(off > KS8995_REGS_SIZE))
+               return 0;
+
+       if ((off + count) > KS8995_REGS_SIZE)
+               count = KS8995_REGS_SIZE - off;
+
+       if (unlikely(!count))
+               return count;
+
+       return ks8995_read(ks8995, buf, off, count);
+}
+
+
+static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+       struct device *dev;
+       struct ks8995_switch *ks8995;
+
+       dev = container_of(kobj, struct device, kobj);
+       ks8995 = dev_get_drvdata(dev);
+
+       if (unlikely(off >= KS8995_REGS_SIZE))
+               return -EFBIG;
+
+       if ((off + count) > KS8995_REGS_SIZE)
+               count = KS8995_REGS_SIZE - off;
+
+       if (unlikely(!count))
+               return count;
+
+       return ks8995_write(ks8995, buf, off, count);
+}
+
+
+static struct bin_attribute ks8995_registers_attr = {
+       .attr = {
+               .name   = "registers",
+               .mode   = S_IRUSR | S_IWUSR,
+       },
+       .size   = KS8995_REGS_SIZE,
+       .read   = ks8995_registers_read,
+       .write  = ks8995_registers_write,
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int __devinit ks8995_probe(struct spi_device *spi)
+{
+       struct ks8995_switch    *ks;
+       struct ks8995_pdata     *pdata;
+       u8      ids[2];
+       int     err;
+
+       /* Chip description */
+       pdata = spi->dev.platform_data;
+
+       ks = kzalloc(sizeof(*ks), GFP_KERNEL);
+       if (!ks) {
+               dev_err(&spi->dev, "no memory for private data\n");
+               return -ENOMEM;
+       }
+
+       mutex_init(&ks->lock);
+       ks->pdata = pdata;
+       ks->spi = spi_dev_get(spi);
+       dev_set_drvdata(&spi->dev, ks);
+
+       spi->mode = SPI_MODE_0;
+       spi->bits_per_word = 8;
+       err = spi_setup(spi);
+       if (err) {
+               dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
+               goto err_drvdata;
+       }
+
+       err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids));
+       if (err < 0) {
+               dev_err(&spi->dev, "unable to read id registers, err=%d\n",
+                               err);
+               goto err_drvdata;
+       }
+
+       switch (ids[0]) {
+       case FAMILY_KS8995:
+               break;
+       default:
+               dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]);
+               err = -ENODEV;
+               goto err_drvdata;
+       }
+
+       err = ks8995_reset(ks);
+       if (err)
+               goto err_drvdata;
+
+       err = sysfs_create_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+       if (err) {
+               dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
+                                   err);
+               goto err_drvdata;
+       }
+
+       dev_info(&spi->dev, "KS89%02X device found, Chip ID:%01x, "
+                       "Revision:%01x\n", ids[0],
+                       get_chip_id(ids[1]), get_chip_rev(ids[1]));
+
+       return 0;
+
+err_drvdata:
+       dev_set_drvdata(&spi->dev, NULL);
+       kfree(ks);
+       return err;
+}
+
+static int __devexit ks8995_remove(struct spi_device *spi)
+{
+       struct ks8995_data      *ks8995;
+
+       ks8995 = dev_get_drvdata(&spi->dev);
+       sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+
+       dev_set_drvdata(&spi->dev, NULL);
+       kfree(ks8995);
+
+       return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct spi_driver ks8995_driver = {
+       .driver = {
+               .name       = "spi-ks8995",
+               .bus         = &spi_bus_type,
+               .owner     = THIS_MODULE,
+       },
+       .probe    = ks8995_probe,
+       .remove   = __devexit_p(ks8995_remove),
+};
+
+static int __init ks8995_init(void)
+{
+       printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n");
+
+       return spi_register_driver(&ks8995_driver);
+}
+module_init(ks8995_init);
+
+static void __exit ks8995_exit(void)
+{
+       spi_unregister_driver(&ks8995_driver);
+}
+module_exit(ks8995_exit);
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_LICENSE("GPL v2");
index ede899c..c1c9293 100644 (file)
@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
        lock_sock(sk);
 
        opt->src_addr = sp->sa_addr.pptp;
-       if (add_chan(po)) {
-               release_sock(sk);
+       if (add_chan(po))
                error = -EBUSY;
-       }
 
        release_sock(sk);
        return error;
index 064155d..ed2a862 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/ctype.h>
 #include <linux/notifier.h>
 #include <linux/netdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/if_arp.h>
 #include <linux/socket.h>
 #include <linux/etherdevice.h>
@@ -587,6 +588,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                goto err_dev_open;
        }
 
+       err = vlan_vids_add_by_dev(port_dev, dev);
+       if (err) {
+               netdev_err(dev, "Failed to add vlan ids to device %s\n",
+                               portname);
+               goto err_vids_add;
+       }
+
        err = netdev_set_master(port_dev, dev);
        if (err) {
                netdev_err(dev, "Device %s failed to set master\n", portname);
@@ -614,6 +622,9 @@ err_handler_register:
        netdev_set_master(port_dev, NULL);
 
 err_set_master:
+       vlan_vids_del_by_dev(port_dev, dev);
+
+err_vids_add:
        dev_close(port_dev);
 
 err_dev_open:
@@ -647,6 +658,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
        team_adjust_ops(team);
        netdev_rx_handler_unregister(port_dev);
        netdev_set_master(port_dev, NULL);
+       vlan_vids_del_by_dev(port_dev, dev);
        dev_close(port_dev);
        team_port_leave(team, port);
        team_port_set_orig_mac(port);
@@ -902,34 +914,45 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        return stats;
 }
 
-static void team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
+static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
 {
        struct team *team = netdev_priv(dev);
        struct team_port *port;
+       int err;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(port, &team->port_list, list) {
-               const struct net_device_ops *ops = port->dev->netdev_ops;
-
-               if (ops->ndo_vlan_rx_add_vid)
-                       ops->ndo_vlan_rx_add_vid(port->dev, vid);
+       /*
+        * Alhough this is reader, it's guarded by team lock. It's not possible
+        * to traverse list in reverse under rcu_read_lock
+        */
+       mutex_lock(&team->lock);
+       list_for_each_entry(port, &team->port_list, list) {
+               err = vlan_vid_add(port->dev, vid);
+               if (err)
+                       goto unwind;
        }
-       rcu_read_unlock();
+       mutex_unlock(&team->lock);
+
+       return 0;
+
+unwind:
+       list_for_each_entry_continue_reverse(port, &team->port_list, list)
+               vlan_vid_del(port->dev, vid);
+       mutex_unlock(&team->lock);
+
+       return err;
 }
 
-static void team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
+static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
 {
        struct team *team = netdev_priv(dev);
        struct team_port *port;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(port, &team->port_list, list) {
-               const struct net_device_ops *ops = port->dev->netdev_ops;
-
-               if (ops->ndo_vlan_rx_kill_vid)
-                       ops->ndo_vlan_rx_kill_vid(port->dev, vid);
-       }
+       list_for_each_entry_rcu(port, &team->port_list, list)
+               vlan_vid_del(port->dev, vid);
        rcu_read_unlock();
+
+       return 0;
 }
 
 static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
index e6fed4d..dbdca22 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
 
-#define DRIVER_VERSION "08-Nov-2011"
+#define DRIVER_VERSION "22-Dec-2011"
 #define DRIVER_NAME "asix"
 
 /* ASIX AX8817X based USB 2.0 Ethernet Devices */
@@ -689,6 +689,10 @@ asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        }
        wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
        wolinfo->wolopts = 0;
+       if (opt & AX_MONITOR_LINK)
+               wolinfo->wolopts |= WAKE_PHY;
+       if (opt & AX_MONITOR_MAGIC)
+               wolinfo->wolopts |= WAKE_MAGIC;
 }
 
 static int
@@ -1655,6 +1659,10 @@ static const struct usb_device_id        products [] = {
        // ASIX 88772a
        USB_DEVICE(0x0db0, 0xa877),
        .driver_info = (unsigned long) &ax88772_info,
+}, {
+       // Asus USB Ethernet Adapter
+       USB_DEVICE (0x0b95, 0x7e2b),
+       .driver_info = (unsigned long) &ax88772_info,
 },
        { },            // END
 };
index 769f509..5d99b8c 100644 (file)
@@ -55,8 +55,8 @@ static const char driver_name[] = "pegasus";
 #define        BMSR_MEDIA      (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
                        BMSR_100FULL | BMSR_ANEGCAPABLE)
 
-static int loopback;
-static int mii_mode;
+static bool loopback;
+static bool mii_mode;
 static char *devid;
 
 static struct usb_eth_dev usb_dev_id[] = {
@@ -517,7 +517,7 @@ static inline int reset_mac(pegasus_t *pegasus)
        for (i = 0; i < REG_TIMEOUT; i++) {
                get_registers(pegasus, EthCtrl1, 1, &data);
                if (~data & 0x08) {
-                       if (loopback & 1)
+                       if (loopback)
                                break;
                        if (mii_mode && (pegasus->features & HAS_HOME_PNA))
                                set_register(pegasus, Gpio1, 0x34);
@@ -561,7 +561,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
                data[1] |= 0x10;        /* set 100 Mbps */
        if (mii_mode)
                data[1] = 0;
-       data[2] = (loopback & 1) ? 0x09 : 0x01;
+       data[2] = loopback ? 0x09 : 0x01;
 
        memcpy(pegasus->eth_regs, data, sizeof(data));
        ret = set_registers(pegasus, EthCtrl0, 3, data);
index 7d62c39..0d5da82 100644 (file)
@@ -76,7 +76,7 @@ struct usb_context {
        struct usbnet *dev;
 };
 
-static int turbo_mode = true;
+static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
index 56f3894..db217ad 100644 (file)
@@ -59,7 +59,7 @@ struct usb_context {
        struct usbnet *dev;
 };
 
-static int turbo_mode = true;
+static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
index 5a96172..d1c3dce 100644 (file)
@@ -30,7 +30,7 @@
 static int napi_weight = 128;
 module_param(napi_weight, int, 0444);
 
-static int csum = 1, gso = 1;
+static bool csum = true, gso = true;
 module_param(csum, bool, 0444);
 module_param(gso, bool, 0444);
 
@@ -855,7 +855,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
        kfree(buf);
 }
 
-static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg;
@@ -865,9 +865,10 @@ static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
                                  VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
                dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
+       return 0;
 }
 
-static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg;
@@ -877,6 +878,7 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
                                  VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
                dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
+       return 0;
 }
 
 static void virtnet_get_ringparam(struct net_device *dev,
index d96bfb1..de7fc34 100644 (file)
@@ -1926,7 +1926,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
 }
 
 
-static void
+static int
 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1943,10 +1943,12 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        }
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 
-static void
+static int
 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1963,6 +1965,8 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        }
 
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 
@@ -2163,7 +2167,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
                rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
                get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
                for (i = 0; i < rssConf->indTableSize; i++)
-                       rssConf->indTable[i] = i % adapter->num_rx_queues;
+                       rssConf->indTable[i] = ethtool_rxfh_indir_default(
+                               i, adapter->num_rx_queues);
 
                devRead->rssConfDesc.confVer = 1;
                devRead->rssConfDesc.confLen = sizeof(*rssConf);
index b492ee1..a3eb75a 100644 (file)
@@ -565,44 +565,38 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
 }
 
 #ifdef VMXNET3_RSS
+static u32
+vmxnet3_get_rss_indir_size(struct net_device *netdev)
+{
+       struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+       struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+       return rssConf->indTableSize;
+}
+
 static int
-vmxnet3_get_rss_indir(struct net_device *netdev,
-                     struct ethtool_rxfh_indir *p)
+vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
        struct UPT1_RSSConf *rssConf = adapter->rss_conf;
-       unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
+       unsigned int n = rssConf->indTableSize;
 
-       p->size = rssConf->indTableSize;
        while (n--)
-               p->ring_index[n] = rssConf->indTable[n];
+               p[n] = rssConf->indTable[n];
        return 0;
 
 }
 
 static int
-vmxnet3_set_rss_indir(struct net_device *netdev,
-                     const struct ethtool_rxfh_indir *p)
+vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p)
 {
        unsigned int i;
        unsigned long flags;
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
        struct UPT1_RSSConf *rssConf = adapter->rss_conf;
 
-       if (p->size != rssConf->indTableSize)
-               return -EINVAL;
-       for (i = 0; i < rssConf->indTableSize; i++) {
-               /*
-                * Return with error code if any of the queue indices
-                * is out of range
-                */
-               if (p->ring_index[i] < 0 ||
-                   p->ring_index[i] >= adapter->num_rx_queues)
-                       return -EINVAL;
-       }
-
        for (i = 0; i < rssConf->indTableSize; i++)
-               rssConf->indTable[i] = p->ring_index[i];
+               rssConf->indTable[i] = p[i];
 
        spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -629,6 +623,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
        .set_ringparam     = vmxnet3_set_ringparam,
        .get_rxnfc         = vmxnet3_get_rxnfc,
 #ifdef VMXNET3_RSS
+       .get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
        .get_rxfh_indir    = vmxnet3_get_rss_indir,
        .set_rxfh_indir    = vmxnet3_set_rss_indir,
 #endif
index 783168c..d43f4ef 100644 (file)
@@ -155,7 +155,7 @@ static int  emancipate( struct net_device * );
 static const char  version[] =
        "Granch SBNI12 driver ver 5.0.1  Jun 22 2001  Denis I.Timofeev.\n";
 
-static int  skip_pci_probe     __initdata = 0;
+static bool skip_pci_probe     __initdata = false;
 static int  scandone   __initdata = 0;
 static int  num                __initdata = 0;
 
index 0b4fd05..4f77484 100644 (file)
@@ -362,7 +362,7 @@ static int io=0x238;
 static int txdma=1;
 static int rxdma=3;
 static int irq=5;
-static int slow=0;
+static bool slow=false;
 
 module_param(io, int, 0);
 MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
index 4b9ecb2..f20886a 100644 (file)
@@ -562,7 +562,7 @@ void i2400m_tx_new(struct i2400m *i2400m)
 {
        struct device *dev = i2400m_dev(i2400m);
        struct i2400m_msg_hdr *tx_msg;
-       bool try_head = 0;
+       bool try_head = false;
        BUG_ON(i2400m->tx_msg != NULL);
        /*
         * In certain situations, TX queue might have enough space to
@@ -580,7 +580,7 @@ try_head:
        else if (tx_msg == TAIL_FULL) {
                i2400m_tx_skip_tail(i2400m);
                d_printf(2, dev, "new TX message: tail full, trying head\n");
-               try_head = 1;
+               try_head = true;
                goto try_head;
        }
        memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -720,7 +720,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
        unsigned long flags;
        size_t padded_len;
        void *ptr;
-       bool try_head = 0;
+       bool try_head = false;
        unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
                || pl_type == I2400M_PT_RESET_COLD;
 
@@ -771,7 +771,7 @@ try_new:
                d_printf(2, dev, "pl append: tail full\n");
                i2400m_tx_close(i2400m);
                i2400m_tx_skip_tail(i2400m);
-               try_head = 1;
+               try_head = true;
                goto try_new;
        } else if (ptr == NULL) {       /* All full */
                result = -ENOSPC;
index ac357ac..99ef81b 100644 (file)
@@ -177,7 +177,6 @@ retry:
 static
 int i2400mu_txd(void *_i2400mu)
 {
-       int result = 0;
        struct i2400mu *i2400mu = _i2400mu;
        struct i2400m *i2400m = &i2400mu->i2400m;
        struct device *dev = &i2400mu->usb_iface->dev;
@@ -208,16 +207,14 @@ int i2400mu_txd(void *_i2400mu)
                /* Yeah, we ignore errors ... not much we can do */
                i2400mu_tx(i2400mu, tx_msg, tx_msg_size);
                i2400m_tx_msg_sent(i2400m);     /* ack it, advance the FIFO */
-               if (result < 0)
-                       break;
        }
 
        spin_lock_irqsave(&i2400m->tx_lock, flags);
        i2400mu->tx_kthread = NULL;
        spin_unlock_irqrestore(&i2400m->tx_lock, flags);
 
-       d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
-       return result;
+       d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
+       return 0;
 }
 
 
index 4596c33..c1d699f 100644 (file)
@@ -152,6 +152,7 @@ struct ath_common {
        struct ath_cycle_counters cc_survey;
 
        struct ath_regulatory regulatory;
+       struct ath_regulatory reg_world_copy;
        const struct ath_ops *ops;
        const struct ath_bus_ops *bus_ops;
 
@@ -214,6 +215,10 @@ do {                                                               \
  * @ATH_DBG_HWTIMER: hardware timer handling
  * @ATH_DBG_BTCOEX: bluetooth coexistance
  * @ATH_DBG_BSTUCK: stuck beacons
+ * @ATH_DBG_MCI: Message Coexistence Interface, a private protocol
+ *     used exclusively for WLAN-BT coexistence starting from
+ *     AR9462.
+ * @ATH_DBG_DFS: radar datection
  * @ATH_DBG_ANY: enable all debugging
  *
  * The debug level is used to control the amount and type of debugging output
@@ -240,6 +245,7 @@ enum ATH_DEBUG {
        ATH_DBG_WMI             = 0x00004000,
        ATH_DBG_BSTUCK          = 0x00008000,
        ATH_DBG_MCI             = 0x00010000,
+       ATH_DBG_DFS             = 0x00020000,
        ATH_DBG_ANY             = 0xffffffff
 };
 
index e5be7e7..ee7ea57 100644 (file)
@@ -166,7 +166,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
                if (to_platform_device(ah->dev)->id == 0 &&
                    (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
                     (BD_WLAN1 | BD_WLAN0))
-                       __set_bit(ATH_STAT_2G_DISABLED, ah->status);
+                       ah->ah_capabilities.cap_needs_2GHz_ovr = true;
+               else
+                       ah->ah_capabilities.cap_needs_2GHz_ovr = false;
        }
 
        ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
index bea90e6..bf67416 100644 (file)
  * or reducing sensitivity as necessary.
  *
  * The parameters are:
+ *
  *   - "noise immunity"
+ *
  *   - "spur immunity"
+ *
  *   - "firstep level"
+ *
  *   - "OFDM weak signal detection"
+ *
  *   - "CCK weak signal detection"
  *
  * Basically we look at the amount of ODFM and CCK timing errors we get and then
  * raise or lower immunity accordingly by setting one or more of these
  * parameters.
+ *
  * Newer chipsets have PHY error counters in hardware which will generate a MIB
  * interrupt when they overflow. Older hardware has too enable PHY error frames
  * by setting a RX flag and then count every single PHY error. When a specified
  */
 
 
-/*** ANI parameter control ***/
+/***********************\
+* ANI parameter control *
+\***********************/
 
 /**
  * ath5k_ani_set_noise_immunity_level() - Set noise immunity level
- *
+ * @ah: The &struct ath5k_hw
  * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
  */
 void
@@ -91,12 +99,11 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
        ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
 }
 
-
 /**
  * ath5k_ani_set_spur_immunity_level() - Set spur immunity level
- *
+ * @ah: The &struct ath5k_hw
  * @level: level between 0 and @max_spur_level (the maximum level is dependent
- *     on the chip revision).
+ * on the chip revision).
  */
 void
 ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
@@ -117,10 +124,9 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
        ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
 }
 
-
 /**
  * ath5k_ani_set_firstep_level() - Set "firstep" level
- *
+ * @ah: The &struct ath5k_hw
  * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
  */
 void
@@ -140,11 +146,9 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
        ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
 }
 
-
 /**
- * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
- *                                             detection
- *
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
+ * @ah: The &struct ath5k_hw
  * @on: turn on or off
  */
 void
@@ -182,10 +186,9 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
                          on ? "on" : "off");
 }
 
-
 /**
- * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
- *
+ * ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
+ * @ah: The &struct ath5k_hw
  * @on: turn on or off
  */
 void
@@ -200,13 +203,16 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
 }
 
 
-/*** ANI algorithm ***/
+/***************\
+* ANI algorithm *
+\***************/
 
 /**
  * ath5k_ani_raise_immunity() - Increase noise immunity
- *
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  * @ofdm_trigger: If this is true we are called because of too many OFDM errors,
- *     the algorithm will tune more parameters then.
+ * the algorithm will tune more parameters then.
  *
  * Try to raise noise immunity (=decrease sensitivity) in several steps
  * depending on the average RSSI of the beacons we received.
@@ -290,9 +296,10 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
        */
 }
 
-
 /**
  * ath5k_ani_lower_immunity() - Decrease noise immunity
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  *
  * Try to lower noise immunity (=increase sensitivity) in several steps
  * depending on the average RSSI of the beacons we received.
@@ -352,9 +359,10 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
        }
 }
 
-
 /**
  * ath5k_hw_ani_get_listen_time() - Update counters and return listening time
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  *
  * Return an approximation of the time spent "listening" in milliseconds (ms)
  * since the last call of this function.
@@ -379,9 +387,10 @@ ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
        return listen;
 }
 
-
 /**
  * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  *
  * Clear the PHY error counters as soon as possible, since this might be called
  * from a MIB interrupt and we want to make sure we don't get interrupted again.
@@ -429,14 +438,14 @@ ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
        return 1;
 }
 
-
 /**
  * ath5k_ani_period_restart() - Restart ANI period
+ * @as: The &struct ath5k_ani_state
  *
  * Just reset counters, so they are clear for the next "ani period".
  */
 static void
-ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+ath5k_ani_period_restart(struct ath5k_ani_state *as)
 {
        /* keep last values for debugging */
        as->last_ofdm_errors = as->ofdm_errors;
@@ -448,9 +457,9 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
        as->listen_time = 0;
 }
 
-
 /**
  * ath5k_ani_calibration() - The main ANI calibration function
+ * @ah: The &struct ath5k_hw
  *
  * We count OFDM and CCK errors relative to the time where we did not send or
  * receive ("listen" time) and raise or lower immunity accordingly.
@@ -492,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
                /* too many PHY errors - we have to raise immunity */
                bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
                ath5k_ani_raise_immunity(ah, as, ofdm_flag);
-               ath5k_ani_period_restart(ah, as);
+               ath5k_ani_period_restart(as);
 
        } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
                /* If more than 5 (TODO: why 5?) periods have passed and we got
@@ -504,15 +513,18 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
                if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
                        ath5k_ani_lower_immunity(ah, as);
 
-               ath5k_ani_period_restart(ah, as);
+               ath5k_ani_period_restart(as);
        }
 }
 
 
-/*** INTERRUPT HANDLER ***/
+/*******************\
+* Interrupt handler *
+\*******************/
 
 /**
  * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ * @ah: The &struct ath5k_hw
  *
  * Just read & reset the registers quickly, so they don't generate more
  * interrupts, save the counters and schedule the tasklet to decide whether
@@ -549,9 +561,11 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
                tasklet_schedule(&ah->ani_tasklet);
 }
 
-
 /**
- * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
+ * ath5k_ani_phy_error_report - Used by older HW to report PHY errors
+ *
+ * @ah: The &struct ath5k_hw
+ * @phyerr: One of enum ath5k_phy_error_code
  *
  * This is used by hardware without PHY error counters to report PHY errors
  * on a frame-by-frame basis, instead of the interrupt.
@@ -574,10 +588,13 @@ ath5k_ani_phy_error_report(struct ath5k_hw *ah,
 }
 
 
-/*** INIT ***/
+/****************\
+* Initialization *
+\****************/
 
 /**
  * ath5k_enable_phy_err_counters() - Enable PHY error counters
+ * @ah: The &struct ath5k_hw
  *
  * Enable PHY error counters for OFDM and CCK timing errors.
  */
@@ -596,9 +613,9 @@ ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
        ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
 }
 
-
 /**
  * ath5k_disable_phy_err_counters() - Disable PHY error counters
+ * @ah: The &struct ath5k_hw
  *
  * Disable PHY error counters for OFDM and CCK timing errors.
  */
@@ -615,10 +632,10 @@ ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
        ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
 }
 
-
 /**
  * ath5k_ani_init() - Initialize ANI
- * @mode: Which mode to use (auto, manual high, manual low, off)
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_ani_mode
  *
  * Initialize ANI according to mode.
  */
@@ -695,10 +712,18 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
 }
 
 
-/*** DEBUG ***/
+/**************\
+* Debug output *
+\**************/
 
 #ifdef CONFIG_ATH5K_DEBUG
 
+/**
+ * ath5k_ani_print_counters() - Print ANI counters
+ * @ah: The &struct ath5k_hw
+ *
+ * Used for debugging ANI
+ */
 void
 ath5k_ani_print_counters(struct ath5k_hw *ah)
 {
index 7358b6c..21aa355 100644 (file)
@@ -40,13 +40,13 @@ enum ath5k_phy_error_code;
  * enum ath5k_ani_mode - mode for ANI / noise sensitivity
  *
  * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
- *     algorithm after it has been on auto mode.
- * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
- *     maximizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
- *     minimizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
- *     amount of OFDM and CCK frame errors (default).
+ *                     algorithm after it has been on auto mode.
+ * @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ *                     maximizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ *                     minimizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ *                     amount of OFDM and CCK frame errors (default).
  */
 enum ath5k_ani_mode {
        ATH5K_ANI_MODE_OFF              = 0,
@@ -58,8 +58,22 @@ enum ath5k_ani_mode {
 
 /**
  * struct ath5k_ani_state - ANI state and associated counters
- *
- * @max_spur_level: the maximum spur level is chip dependent
+ * @ani_mode: One of enum ath5k_ani_mode
+ * @noise_imm_level: Noise immunity level
+ * @spur_level: Spur immunity level
+ * @firstep_level: FIRstep level
+ * @ofdm_weak_sig: OFDM weak signal detection state (on/off)
+ * @cck_weak_sig: CCK weak signal detection state (on/off)
+ * @max_spur_level: Max spur immunity level (chip specific)
+ * @listen_time: Listen time
+ * @ofdm_errors: OFDM timing error count
+ * @cck_errors: CCK timing error count
+ * @last_cc: The &struct ath_cycle_counters (for stats)
+ * @last_listen: Listen time from previous run (for stats)
+ * @last_ofdm_errors: OFDM timing error count from previous run (for tats)
+ * @last_cck_errors: CCK timing error count from previous run (for stats)
+ * @sum_ofdm_errors: Sum of OFDM timing errors (for stats)
+ * @sum_cck_errors: Sum of all CCK timing errors (for stats)
  */
 struct ath5k_ani_state {
        enum ath5k_ani_mode     ani_mode;
index fecbcd9..c2b2518 100644 (file)
 #define AR5K_TUNE_MAX_TXPOWER                  63
 #define AR5K_TUNE_DEFAULT_TXPOWER              25
 #define AR5K_TUNE_TPC_TXPOWER                  false
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL    10000   /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL    60000   /* 60 sec */
+#define        ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT   10000   /* 10 sec */
 #define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI    1000    /* 1 sec */
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF     60000   /* 60 sec */
-
 #define ATH5K_TX_COMPLETE_POLL_INT             3000    /* 3 sec */
 
 #define AR5K_INIT_CARR_SENSE_EN                        1
 #define        AR5K_AGC_SETTLING_TURBO                 37
 
 
-/* GENERIC CHIPSET DEFINITIONS */
 
-/* MAC Chips */
+/*****************************\
+* GENERIC CHIPSET DEFINITIONS *
+\*****************************/
+
+/**
+ * enum ath5k_version - MAC Chips
+ * @AR5K_AR5210: AR5210 (Crete)
+ * @AR5K_AR5211: AR5211 (Oahu/Maui)
+ * @AR5K_AR5212: AR5212 (Venice) and newer
+ */
 enum ath5k_version {
        AR5K_AR5210     = 0,
        AR5K_AR5211     = 1,
        AR5K_AR5212     = 2,
 };
 
-/* PHY Chips */
+/**
+ * enum ath5k_radio - PHY Chips
+ * @AR5K_RF5110: RF5110 (Fez)
+ * @AR5K_RF5111: RF5111 (Sombrero)
+ * @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2)
+ * @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite)
+ * @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor)
+ * @AR5K_RF2316: RF2315/2316 (Cobra SoC)
+ * @AR5K_RF2317: RF2317 (Spider SoC)
+ * @AR5K_RF2425: RF2425/2417 (Swan/Nalla)
+ */
 enum ath5k_radio {
        AR5K_RF5110     = 0,
        AR5K_RF5111     = 1,
@@ -303,11 +320,11 @@ enum ath5k_radio {
 #define AR5K_SREV_AR5213A      0x59 /* Hainan */
 #define AR5K_SREV_AR2413       0x78 /* Griffin lite */
 #define AR5K_SREV_AR2414       0x70 /* Griffin */
-#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
-#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
+#define AR5K_SREV_AR2315_R6    0x86 /* AP51-Light */
+#define AR5K_SREV_AR2315_R7    0x87 /* AP51-Full */
 #define AR5K_SREV_AR5424       0x90 /* Condor */
-#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
-#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
+#define AR5K_SREV_AR2317_R1    0x90 /* AP61-Light */
+#define AR5K_SREV_AR2317_R2    0x91 /* AP61-Full */
 #define AR5K_SREV_AR5413       0xa4 /* Eagle lite */
 #define AR5K_SREV_AR5414       0xa0 /* Eagle */
 #define AR5K_SREV_AR2415       0xb0 /* Talon */
@@ -344,32 +361,40 @@ enum ath5k_radio {
 
 /* TODO add support to mac80211 for vendor-specific rates and modes */
 
-/*
+/**
+ * DOC: Atheros XR
+ *
  * Some of this information is based on Documentation from:
  *
  * http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
  *
- * Modulation for Atheros' eXtended Range - range enhancing extension that is
- * supposed to double the distance an Atheros client device can keep a
- * connection with an Atheros access point. This is achieved by increasing
- * the receiver sensitivity up to, -105dBm, which is about 20dB above what
- * the 802.11 specifications demand. In addition, new (proprietary) data rates
- * are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
+ * Atheros' eXtended Range - range enhancing extension is a modulation scheme
+ * that is supposed to double the link distance between an Atheros XR-enabled
+ * client device with an Atheros XR-enabled access point. This is achieved
+ * by increasing the receiver sensitivity up to, -105dBm, which is about 20dB
+ * above what the 802.11 specifications demand. In addition, new (proprietary)
+ * data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
  *
  * Please note that can you either use XR or TURBO but you cannot use both,
  * they are exclusive.
  *
+ * Also note that we do not plan to support XR mode at least for now. You can
+ * get a mode similar to XR by using 5MHz bwmode.
  */
-#define MODULATION_XR          0x00000200
-/*
- * Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a
- * throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s
- * signaling rate achieved through the bonding of two 54Mbit/s 802.11g
- * channels. To use this feature your Access Point must also support it.
+
+
+/**
+ * DOC: Atheros SuperAG
+ *
+ * In addition to XR we have another modulation scheme called TURBO mode
+ * that is supposed to provide a throughput transmission speed up to 40Mbit/s
+ * -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two
+ * 54Mbit/s 802.11g channels. To use this feature both ends must support it.
  * There is also a distinction between "static" and "dynamic" turbo modes:
  *
  * - Static: is the dumb version: devices set to this mode stick to it until
  *     the mode is turned off.
+ *
  * - Dynamic: is the intelligent version, the network decides itself if it
  *     is ok to use turbo. As soon as traffic is detected on adjacent channels
  *     (which would get used in turbo mode), or when a non-turbo station joins
@@ -383,24 +408,39 @@ enum ath5k_radio {
  *
  * http://www.pcworld.com/article/id,113428-page,1/article.html
  *
- * The channel bonding seems to be driver specific though. In addition to
- * deciding what channels will be used, these "Turbo" modes are accomplished
- * by also enabling the following features:
+ * The channel bonding seems to be driver specific though.
+ *
+ * In addition to TURBO modes we also have the following features for even
+ * greater speed-up:
  *
  * - Bursting: allows multiple frames to be sent at once, rather than pausing
  *     after each frame. Bursting is a standards-compliant feature that can be
  *     used with any Access Point.
+ *
  * - Fast frames: increases the amount of information that can be sent per
  *     frame, also resulting in a reduction of transmission overhead. It is a
  *     proprietary feature that needs to be supported by the Access Point.
+ *
  * - Compression: data frames are compressed in real time using a Lempel Ziv
  *     algorithm. This is done transparently. Once this feature is enabled,
  *     compression and decompression takes place inside the chipset, without
  *     putting additional load on the host CPU.
  *
+ * As with XR we also don't plan to support SuperAG features for now. You can
+ * get a mode similar to TURBO by using 40MHz bwmode.
  */
-#define MODULATION_TURBO       0x00000080
 
+
+/**
+ * enum ath5k_driver_mode - PHY operation mode
+ * @AR5K_MODE_11A: 802.11a
+ * @AR5K_MODE_11B: 802.11b
+ * @AR5K_MODE_11G: 801.11g
+ * @AR5K_MODE_MAX: Used for boundary checks
+ *
+ * Do not change the order here, we use these as
+ * array indices and it also maps EEPROM structures.
+ */
 enum ath5k_driver_mode {
        AR5K_MODE_11A           =       0,
        AR5K_MODE_11B           =       1,
@@ -408,30 +448,64 @@ enum ath5k_driver_mode {
        AR5K_MODE_MAX           =       3
 };
 
+/**
+ * enum ath5k_ant_mode - Antenna operation mode
+ * @AR5K_ANTMODE_DEFAULT: Default antenna setup
+ * @AR5K_ANTMODE_FIXED_A: Only antenna A is present
+ * @AR5K_ANTMODE_FIXED_B: Only antenna B is present
+ * @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap
+ * @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc
+ * @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc
+ * @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx-
+ * @AR5K_ANTMODE_MAX: Used for boundary checks
+ *
+ * For more infos on antenna control check out phy.c
+ */
 enum ath5k_ant_mode {
-       AR5K_ANTMODE_DEFAULT    = 0,    /* default antenna setup */
-       AR5K_ANTMODE_FIXED_A    = 1,    /* only antenna A is present */
-       AR5K_ANTMODE_FIXED_B    = 2,    /* only antenna B is present */
-       AR5K_ANTMODE_SINGLE_AP  = 3,    /* sta locked on a single ap */
-       AR5K_ANTMODE_SECTOR_AP  = 4,    /* AP with tx antenna set on tx desc */
-       AR5K_ANTMODE_SECTOR_STA = 5,    /* STA with tx antenna set on tx desc */
-       AR5K_ANTMODE_DEBUG      = 6,    /* Debug mode -A -> Rx, B-> Tx- */
+       AR5K_ANTMODE_DEFAULT    = 0,
+       AR5K_ANTMODE_FIXED_A    = 1,
+       AR5K_ANTMODE_FIXED_B    = 2,
+       AR5K_ANTMODE_SINGLE_AP  = 3,
+       AR5K_ANTMODE_SECTOR_AP  = 4,
+       AR5K_ANTMODE_SECTOR_STA = 5,
+       AR5K_ANTMODE_DEBUG      = 6,
        AR5K_ANTMODE_MAX,
 };
 
+/**
+ * enum ath5k_bw_mode - Bandwidth operation mode
+ * @AR5K_BWMODE_DEFAULT: 20MHz, default operation
+ * @AR5K_BWMODE_5MHZ: Quarter rate
+ * @AR5K_BWMODE_10MHZ: Half rate
+ * @AR5K_BWMODE_40MHZ: Turbo
+ */
 enum ath5k_bw_mode {
-       AR5K_BWMODE_DEFAULT     = 0,    /* 20MHz, default operation */
-       AR5K_BWMODE_5MHZ        = 1,    /* Quarter rate */
-       AR5K_BWMODE_10MHZ       = 2,    /* Half rate */
-       AR5K_BWMODE_40MHZ       = 3     /* Turbo */
+       AR5K_BWMODE_DEFAULT     = 0,
+       AR5K_BWMODE_5MHZ        = 1,
+       AR5K_BWMODE_10MHZ       = 2,
+       AR5K_BWMODE_40MHZ       = 3
 };
 
+
+
 /****************\
   TX DEFINITIONS
 \****************/
 
-/*
- * TX Status descriptor
+/**
+ * struct ath5k_tx_status - TX Status descriptor
+ * @ts_seqnum: Sequence number
+ * @ts_tstamp: Timestamp
+ * @ts_status: Status code
+ * @ts_final_idx: Final transmission series index
+ * @ts_final_retry: Final retry count
+ * @ts_rssi: RSSI for received ACK
+ * @ts_shortretry: Short retry count
+ * @ts_virtcol: Virtual collision count
+ * @ts_antenna: Antenna used
+ *
+ * TX status descriptor gets filled by the hw
+ * on each transmission attempt.
  */
 struct ath5k_tx_status {
        u16     ts_seqnum;
@@ -454,7 +528,6 @@ struct ath5k_tx_status {
  * enum ath5k_tx_queue - Queue types used to classify tx queues.
  * @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue
  * @AR5K_TX_QUEUE_DATA: A normal data queue
- * @AR5K_TX_QUEUE_XR_DATA: An XR-data queue
  * @AR5K_TX_QUEUE_BEACON: The beacon queue
  * @AR5K_TX_QUEUE_CAB: The after-beacon queue
  * @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue
@@ -462,7 +535,6 @@ struct ath5k_tx_status {
 enum ath5k_tx_queue {
        AR5K_TX_QUEUE_INACTIVE = 0,
        AR5K_TX_QUEUE_DATA,
-       AR5K_TX_QUEUE_XR_DATA,
        AR5K_TX_QUEUE_BEACON,
        AR5K_TX_QUEUE_CAB,
        AR5K_TX_QUEUE_UAPSD,
@@ -471,36 +543,46 @@ enum ath5k_tx_queue {
 #define        AR5K_NUM_TX_QUEUES              10
 #define        AR5K_NUM_TX_QUEUES_NOQCU        2
 
-/*
- * Queue syb-types to classify normal data queues.
+/**
+ * enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues
+ * @AR5K_WME_AC_BK: Background traffic
+ * @AR5K_WME_AC_BE: Best-effort (normal) traffic
+ * @AR5K_WME_AC_VI: Video traffic
+ * @AR5K_WME_AC_VO: Voice traffic
+ *
  * These are the 4 Access Categories as defined in
  * WME spec. 0 is the lowest priority and 4 is the
  * highest. Normal data that hasn't been classified
  * goes to the Best Effort AC.
  */
 enum ath5k_tx_queue_subtype {
-       AR5K_WME_AC_BK = 0,     /*Background traffic*/
-       AR5K_WME_AC_BE,         /*Best-effort (normal) traffic*/
-       AR5K_WME_AC_VI,         /*Video traffic*/
-       AR5K_WME_AC_VO,         /*Voice traffic*/
+       AR5K_WME_AC_BK = 0,
+       AR5K_WME_AC_BE,
+       AR5K_WME_AC_VI,
+       AR5K_WME_AC_VO,
 };
 
-/*
- * Queue ID numbers as returned by the hw functions, each number
- * represents a hw queue. If hw does not support hw queues
- * (eg 5210) all data goes in one queue. These match
- * d80211 definitions (net80211/MadWiFi don't use them).
+/**
+ * enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions
+ * @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index
+ * @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index
+ * @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue
+ * @AR5K_TX_QUEUE_ID_BEACON: Beacon queue
+ * @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery,
+ *
+ * Each number represents a hw queue. If hw does not support hw queues
+ * (eg 5210) all data goes in one queue.
  */
 enum ath5k_tx_queue_id {
        AR5K_TX_QUEUE_ID_NOQCU_DATA     = 0,
        AR5K_TX_QUEUE_ID_NOQCU_BEACON   = 1,
-       AR5K_TX_QUEUE_ID_DATA_MIN       = 0, /*IEEE80211_TX_QUEUE_DATA0*/
-       AR5K_TX_QUEUE_ID_DATA_MAX       = 3, /*IEEE80211_TX_QUEUE_DATA3*/
-       AR5K_TX_QUEUE_ID_DATA_SVP       = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
-       AR5K_TX_QUEUE_ID_CAB            = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
-       AR5K_TX_QUEUE_ID_BEACON         = 7, /*IEEE80211_TX_QUEUE_BEACON*/
-       AR5K_TX_QUEUE_ID_UAPSD          = 8,
-       AR5K_TX_QUEUE_ID_XR_DATA        = 9,
+       AR5K_TX_QUEUE_ID_DATA_MIN       = 0,
+       AR5K_TX_QUEUE_ID_DATA_MAX       = 3,
+       AR5K_TX_QUEUE_ID_UAPSD          = 7,
+       AR5K_TX_QUEUE_ID_CAB            = 8,
+       AR5K_TX_QUEUE_ID_BEACON         = 9,
 };
 
 /*
@@ -521,46 +603,70 @@ enum ath5k_tx_queue_id {
 #define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS                0x1000  /* Disable backoff while bursting */
 #define AR5K_TXQ_FLAG_COMPRESSION_ENABLE       0x2000  /* Enable hw compression -not implemented-*/
 
-/*
- * Data transmit queue state.  One of these exists for each
- * hardware transmit queue.  Packets sent to us from above
- * are assigned to queues based on their priority.  Not all
- * devices support a complete set of hardware transmit queues.
- * For those devices the array sc_ac2q will map multiple
- * priorities to fewer hardware queues (typically all to one
- * hardware queue).
+/**
+ * struct ath5k_txq - Transmit queue state
+ * @qnum: Hardware q number
+ * @link: Link ptr in last TX desc
+ * @q: Transmit queue (&struct list_head)
+ * @lock: Lock on q and link
+ * @setup: Is the queue configured
+ * @txq_len:Number of queued buffers
+ * @txq_max: Max allowed num of queued buffers
+ * @txq_poll_mark: Used to check if queue got stuck
+ * @txq_stuck: Queue stuck counter
+ *
+ * One of these exists for each hardware transmit queue.
+ * Packets sent to us from above are assigned to queues based
+ * on their priority.  Not all devices support a complete set
+ * of hardware transmit queues. For those devices the array
+ * sc_ac2q will map multiple priorities to fewer hardware queues
+ * (typically all to one hardware queue).
  */
 struct ath5k_txq {
-       unsigned int            qnum;   /* hardware q number */
-       u32                     *link;  /* link ptr in last TX desc */
-       struct list_head        q;      /* transmit queue */
-       spinlock_t              lock;   /* lock on q and link */
+       unsigned int            qnum;
+       u32                     *link;
+       struct list_head        q;
+       spinlock_t              lock;
        bool                    setup;
-       int                     txq_len; /* number of queued buffers */
-       int                     txq_max; /* max allowed num of queued buffers */
+       int                     txq_len;
+       int                     txq_max;
        bool                    txq_poll_mark;
-       unsigned int            txq_stuck;      /* informational counter */
+       unsigned int            txq_stuck;
 };
 
-/*
- * A struct to hold tx queue's parameters
+/**
+ * struct ath5k_txq_info - A struct to hold TX queue's parameters
+ * @tqi_type: One of enum ath5k_tx_queue
+ * @tqi_subtype: One of enum ath5k_tx_queue_subtype
+ * @tqi_flags: TX queue flags (see above)
+ * @tqi_aifs: Arbitrated Inter-frame Space
+ * @tqi_cw_min: Minimum Contention Window
+ * @tqi_cw_max: Maximum Contention Window
+ * @tqi_cbr_period: Constant bit rate period
+ * @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled
  */
 struct ath5k_txq_info {
        enum ath5k_tx_queue tqi_type;
        enum ath5k_tx_queue_subtype tqi_subtype;
-       u16     tqi_flags;      /* Tx queue flags (see above) */
-       u8      tqi_aifs;       /* Arbitrated Interframe Space */
-       u16     tqi_cw_min;     /* Minimum Contention Window */
-       u16     tqi_cw_max;     /* Maximum Contention Window */
-       u32     tqi_cbr_period; /* Constant bit rate period */
+       u16     tqi_flags;
+       u8      tqi_aifs;
+       u16     tqi_cw_min;
+       u16     tqi_cw_max;
+       u32     tqi_cbr_period;
        u32     tqi_cbr_overflow_limit;
        u32     tqi_burst_time;
-       u32     tqi_ready_time; /* Time queue waits after an event */
+       u32     tqi_ready_time;
 };
 
-/*
- * Transmit packet types.
- * used on tx control descriptor
+/**
+ * enum ath5k_pkt_type - Transmit packet types
+ * @AR5K_PKT_TYPE_NORMAL: Normal data
+ * @AR5K_PKT_TYPE_ATIM: ATIM
+ * @AR5K_PKT_TYPE_PSPOLL: PS-Poll
+ * @AR5K_PKT_TYPE_BEACON: Beacon
+ * @AR5K_PKT_TYPE_PROBE_RESP: Probe response
+ * @AR5K_PKT_TYPE_PIFS: PIFS
+ * Used on tx control descriptor
  */
 enum ath5k_pkt_type {
        AR5K_PKT_TYPE_NORMAL            = 0,
@@ -583,27 +689,23 @@ enum ath5k_pkt_type {
        (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v)     \
 )
 
-/*
- * DMA size definitions (2^(n+2))
- */
-enum ath5k_dmasize {
-       AR5K_DMASIZE_4B = 0,
-       AR5K_DMASIZE_8B,
-       AR5K_DMASIZE_16B,
-       AR5K_DMASIZE_32B,
-       AR5K_DMASIZE_64B,
-       AR5K_DMASIZE_128B,
-       AR5K_DMASIZE_256B,
-       AR5K_DMASIZE_512B
-};
 
 
 /****************\
   RX DEFINITIONS
 \****************/
 
-/*
- * RX Status descriptor
+/**
+ * struct ath5k_rx_status - RX Status descriptor
+ * @rs_datalen: Data length
+ * @rs_tstamp: Timestamp
+ * @rs_status: Status code
+ * @rs_phyerr: PHY error mask
+ * @rs_rssi: RSSI in 0.5dbm units
+ * @rs_keyix: Index to the key used for decrypting
+ * @rs_rate: Rate used to decode the frame
+ * @rs_antenna: Antenna used to receive the frame
+ * @rs_more: Indicates this is a frame fragment (Fast frames)
  */
 struct ath5k_rx_status {
        u16     rs_datalen;
@@ -645,10 +747,18 @@ struct ath5k_rx_status {
 #define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
 
 
+
 /*******************************\
   GAIN OPTIMIZATION DEFINITIONS
 \*******************************/
 
+/**
+ * enum ath5k_rfgain - RF Gain optimization engine state
+ * @AR5K_RFGAIN_INACTIVE: Engine disabled
+ * @AR5K_RFGAIN_ACTIVE: Probe active
+ * @AR5K_RFGAIN_READ_REQUESTED: Probe requested
+ * @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change
+ */
 enum ath5k_rfgain {
        AR5K_RFGAIN_INACTIVE = 0,
        AR5K_RFGAIN_ACTIVE,
@@ -656,6 +766,16 @@ enum ath5k_rfgain {
        AR5K_RFGAIN_NEED_CHANGE,
 };
 
+/**
+ * struct ath5k_gain - RF Gain optimization engine state data
+ * @g_step_idx: Current step index
+ * @g_current: Current gain
+ * @g_target: Target gain
+ * @g_low: Low gain boundary
+ * @g_high: High gain boundary
+ * @g_f_corr: Gain_F correction
+ * @g_state: One of enum ath5k_rfgain
+ */
 struct ath5k_gain {
        u8                      g_step_idx;
        u8                      g_current;
@@ -666,6 +786,8 @@ struct ath5k_gain {
        u8                      g_state;
 };
 
+
+
 /********************\
   COMMON DEFINITIONS
 \********************/
@@ -674,9 +796,14 @@ struct ath5k_gain {
 #define AR5K_SLOT_TIME_20      880
 #define AR5K_SLOT_TIME_MAX     0xffff
 
-/*
- * The following structure is used to map 2GHz channels to
- * 5GHz Atheros channels.
+/**
+ * struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111
+ * @a2_flags: Channel flags (internal)
+ * @a2_athchan: HW channel number (internal)
+ *
+ * This structure is used to map 2GHz channels to
+ * 5GHz Atheros channels on 2111 frequency converter
+ * that comes together with RF5111
  * TODO: Clean up
  */
 struct ath5k_athchan_2ghz {
@@ -684,36 +811,80 @@ struct ath5k_athchan_2ghz {
        u16     a2_athchan;
 };
 
+/**
+ * enum ath5k_dmasize -  DMA size definitions (2^(n+2))
+ * @AR5K_DMASIZE_4B: 4Bytes
+ * @AR5K_DMASIZE_8B: 8Bytes
+ * @AR5K_DMASIZE_16B: 16Bytes
+ * @AR5K_DMASIZE_32B: 32Bytes
+ * @AR5K_DMASIZE_64B: 64Bytes (Default)
+ * @AR5K_DMASIZE_128B: 128Bytes
+ * @AR5K_DMASIZE_256B: 256Bytes
+ * @AR5K_DMASIZE_512B: 512Bytes
+ *
+ * These are used to set DMA burst size on hw
+ *
+ * Note: Some platforms can't handle more than 4Bytes
+ * be careful on embedded boards.
+ */
+enum ath5k_dmasize {
+       AR5K_DMASIZE_4B = 0,
+       AR5K_DMASIZE_8B,
+       AR5K_DMASIZE_16B,
+       AR5K_DMASIZE_32B,
+       AR5K_DMASIZE_64B,
+       AR5K_DMASIZE_128B,
+       AR5K_DMASIZE_256B,
+       AR5K_DMASIZE_512B
+};
+
+
 
 /******************\
   RATE DEFINITIONS
 \******************/
 
 /**
+ * DOC: Rate codes
+ *
  * Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32.
  *
  * The rate code is used to get the RX rate or set the TX rate on the
  * hardware descriptors. It is also used for internal modulation control
  * and settings.
  *
- * This is the hardware rate map we are aware of:
- *
- * rate_code   0x01    0x02    0x03    0x04    0x05    0x06    0x07    0x08
- * rate_kbps   3000    1000    ?       ?       ?       2000    500     48000
- *
- * rate_code   0x09    0x0A    0x0B    0x0C    0x0D    0x0E    0x0F    0x10
- * rate_kbps   24000   12000   6000    54000   36000   18000   9000    ?
+ * This is the hardware rate map we are aware of (html unfriendly):
  *
- * rate_code   17      18      19      20      21      22      23      24
- * rate_kbps   ?       ?       ?       ?       ?       ?       ?       11000
+ * Rate code   Rate (Kbps)
+ * ---------   -----------
+ * 0x01                 3000 (XR)
+ * 0x02                 1000 (XR)
+ * 0x03                  250 (XR)
+ * 0x04 - 05   -Reserved-
+ * 0x06                 2000 (XR)
+ * 0x07                  500 (XR)
+ * 0x08                48000 (OFDM)
+ * 0x09                24000 (OFDM)
+ * 0x0A                12000 (OFDM)
+ * 0x0B                 6000 (OFDM)
+ * 0x0C                54000 (OFDM)
+ * 0x0D                36000 (OFDM)
+ * 0x0E                18000 (OFDM)
+ * 0x0F                 9000 (OFDM)
+ * 0x10 - 17   -Reserved-
+ * 0x18                11000L (CCK)
+ * 0x19                 5500L (CCK)
+ * 0x1A                 2000L (CCK)
+ * 0x1B                 1000L (CCK)
+ * 0x1C                11000S (CCK)
+ * 0x1D                 5500S (CCK)
+ * 0x1E                 2000S (CCK)
+ * 0x1F                -Reserved-
  *
- * rate_code   25      26      27      28      29      30      31      32
- * rate_kbps   5500    2000    1000    11000S  5500S   2000S   ?       ?
- *
- * "S" indicates CCK rates with short preamble.
+ * "S" indicates CCK rates with short preamble and "L" with long preamble.
  *
  * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
- * lowest 4 bits, so they are the same as below with a 0xF mask.
+ * lowest 4 bits, so they are the same as above with a 0xF mask.
  * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
  * We handle this in ath5k_setup_bands().
  */
@@ -733,13 +904,9 @@ struct ath5k_athchan_2ghz {
 #define ATH5K_RATE_CODE_36M    0x0D
 #define ATH5K_RATE_CODE_48M    0x08
 #define ATH5K_RATE_CODE_54M    0x0C
-/* XR */
-#define ATH5K_RATE_CODE_XR_500K        0x07
-#define ATH5K_RATE_CODE_XR_1M  0x02
-#define ATH5K_RATE_CODE_XR_2M  0x06
-#define ATH5K_RATE_CODE_XR_3M  0x01
 
-/* adding this flag to rate_code enables short preamble */
+/* Adding this flag to rate_code on B rates
+ * enables short preamble */
 #define AR5K_SET_SHORT_PREAMBLE 0x04
 
 /*
@@ -747,7 +914,7 @@ struct ath5k_athchan_2ghz {
  */
 
 #define AR5K_KEYCACHE_SIZE     8
-extern int ath5k_modparam_nohwcrypt;
+extern bool ath5k_modparam_nohwcrypt;
 
 /***********************\
  HW RELATED DEFINITIONS
@@ -769,49 +936,65 @@ extern int ath5k_modparam_nohwcrypt;
 
 /**
  * enum ath5k_int - Hardware interrupt masks helpers
+ * @AR5K_INT_RXOK: Frame successfully received
+ * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor
+ * @AR5K_INT_RXERR: Frame reception failed
+ * @AR5K_INT_RXNOFRM: No frame received within a specified time period
+ * @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors
+ * @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is
+ *             not always fatal, on some chips we can continue operation
+ *             without resetting the card, that's why %AR5K_INT_FATAL is not
+ *             common for all chips.
+ * @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts
+ *
+ * @AR5K_INT_TXOK: Frame transmission success
+ * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor
+ * @AR5K_INT_TXERR: Frame transmission failure
+ * @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The
+ *             Queue Control Unit (QCU) signals an EOL interrupt only if a
+ *             descriptor's LinkPtr is NULL. For more details, refer to:
+ *             "http://www.freepatentsonline.com/20030225739.html"
+ * @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period
+ * @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should
+ *             increase the TX trigger threshold.
+ * @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts
  *
- * @AR5K_INT_RX: mask to identify received frame interrupts, of type
- *     AR5K_ISR_RXOK or AR5K_ISR_RXERR
- * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?)
- * @AR5K_INT_RXNOFRM: No frame received (?)
- * @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The
- *     Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's
- *     LinkPtr is NULL. For more details, refer to:
- *     http://www.freepatentsonline.com/20030225739.html
- * @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors).
- *     Note that Rx overrun is not always fatal, on some chips we can continue
- *     operation without resetting the card, that's why int_fatal is not
- *     common for all chips.
- * @AR5K_INT_TX: mask to identify received frame interrupts, of type
- *     AR5K_ISR_TXOK or AR5K_ISR_TXERR
- * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?)
- * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
- *     We currently do increments on interrupt by
- *     (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
  * @AR5K_INT_MIB: Indicates the either Management Information Base counters or
- *     one of the PHY error counters reached the maximum value and should be
- *     read and cleared.
+ *             one of the PHY error counters reached the maximum value and
+ *             should be read and cleared.
+ * @AR5K_INT_SWI: Software triggered interrupt.
  * @AR5K_INT_RXPHY: RX PHY Error
  * @AR5K_INT_RXKCM: RX Key cache miss
  * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
- *     beacon that must be handled in software. The alternative is if you
- *     have VEOL support, in that case you let the hardware deal with things.
+ *             beacon that must be handled in software. The alternative is if
+ *             you have VEOL support, in that case you let the hardware deal
+ *             with things.
+ * @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold
  * @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing
- *     beacons from the AP have associated with, we should probably try to
- *     reassociate. When in IBSS mode this might mean we have not received
- *     any beacons from any local stations. Note that every station in an
- *     IBSS schedules to send beacons at the Target Beacon Transmission Time
- *     (TBTT) with a random backoff.
- * @AR5K_INT_BNR: Beacon Not Ready interrupt - ??
- * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now
- *     until properly handled
- * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA
- *     errors. These types of errors we can enable seem to be of type
- *     AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR.
+ *             beacons from the AP have associated with, we should probably
+ *             try to reassociate. When in IBSS mode this might mean we have
+ *             not received any beacons from any local stations. Note that
+ *             every station in an IBSS schedules to send beacons at the
+ *             Target Beacon Transmission Time (TBTT) with a random backoff.
+ * @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty.
+ * @AR5K_INT_TIM: Beacon with local station's TIM bit set
+ * @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received
+ * @AR5K_INT_DTIM_SYNC: DTIM sync lost
+ * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to
+ *             our GPIO pins.
+ * @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting
+ * @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got
+ *             nothing or an incomplete CAB frame sequence.
+ * @AR5K_INT_QCBRORN: A queue got it's CBR counter expired
+ * @AR5K_INT_QCBRURN: A queue got triggered wile empty
+ * @AR5K_INT_QTRIG: A queue got triggered
+ *
+ * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA
+ *             errors. Indicates we need to reset the card.
  * @AR5K_INT_GLOBAL: Used to clear and set the IER
- * @AR5K_INT_NOCARD: signals the card has been removed
- * @AR5K_INT_COMMON: common interrupts shared among MACs with the same
- *     bit value
+ * @AR5K_INT_NOCARD: Signals the card has been removed
+ * @AR5K_INT_COMMON: Common interrupts shared among MACs with the same
+ *             bit value
  *
  * These are mapped to take advantage of some common bits
  * between the MACs, to be able to set intr properties
@@ -847,15 +1030,15 @@ enum ath5k_int {
        AR5K_INT_GPIO   =       0x01000000,
        AR5K_INT_BCN_TIMEOUT =  0x02000000, /* Non common */
        AR5K_INT_CAB_TIMEOUT =  0x04000000, /* Non common */
-       AR5K_INT_RX_DOPPLER =   0x08000000, /* Non common */
-       AR5K_INT_QCBRORN =      0x10000000, /* Non common */
-       AR5K_INT_QCBRURN =      0x20000000, /* Non common */
-       AR5K_INT_QTRIG  =       0x40000000, /* Non common */
+       AR5K_INT_QCBRORN =      0x08000000, /* Non common */
+       AR5K_INT_QCBRURN =      0x10000000, /* Non common */
+       AR5K_INT_QTRIG  =       0x20000000, /* Non common */
        AR5K_INT_GLOBAL =       0x80000000,
 
        AR5K_INT_TX_ALL = AR5K_INT_TXOK
                | AR5K_INT_TXDESC
                | AR5K_INT_TXERR
+               | AR5K_INT_TXNOFRM
                | AR5K_INT_TXEOL
                | AR5K_INT_TXURN,
 
@@ -891,15 +1074,32 @@ enum ath5k_int {
        AR5K_INT_NOCARD = 0xffffffff
 };
 
-/* mask which calibration is active at the moment */
+/**
+ * enum ath5k_calibration_mask - Mask which calibration is active at the moment
+ * @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT)
+ * @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q)
+ * @AR5K_CALIBRATION_NF: Noise Floor calibration
+ * @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity
+ */
 enum ath5k_calibration_mask {
        AR5K_CALIBRATION_FULL = 0x01,
        AR5K_CALIBRATION_SHORT = 0x02,
-       AR5K_CALIBRATION_ANI = 0x04,
+       AR5K_CALIBRATION_NF = 0x04,
+       AR5K_CALIBRATION_ANI = 0x08,
 };
 
-/*
- * Power management
+/**
+ * enum ath5k_power_mode - Power management modes
+ * @AR5K_PM_UNDEFINED: Undefined
+ * @AR5K_PM_AUTO: Allow card to sleep if possible
+ * @AR5K_PM_AWAKE: Force card to wake up
+ * @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS)
+ * @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration
+ *
+ * Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO
+ * are also known to have problems on some cards. This is not a big
+ * problem though because we can have almost the same effect as
+ * FULL_SLEEP by putting card on warm reset (it's almost powered down).
  */
 enum ath5k_power_mode {
        AR5K_PM_UNDEFINED = 0,
@@ -957,6 +1157,8 @@ struct ath5k_capabilities {
        } cap_queues;
 
        bool cap_has_phyerr_counters;
+       bool cap_has_mrr_support;
+       bool cap_needs_2GHz_ovr;
 };
 
 /* size of noise floor history (keep it a power of two) */
@@ -1072,13 +1274,11 @@ struct ath5k_hw {
        dma_addr_t              desc_daddr;     /* DMA (physical) address */
        size_t                  desc_len;       /* size of TX/RX descriptors */
 
-       DECLARE_BITMAP(status, 6);
+       DECLARE_BITMAP(status, 4);
 #define ATH_STAT_INVALID       0               /* disable hardware accesses */
-#define ATH_STAT_MRRETRY       1               /* multi-rate retry support */
-#define ATH_STAT_PROMISC       2
-#define ATH_STAT_LEDSOFT       3               /* enable LED gpio status */
-#define ATH_STAT_STARTED       4               /* opened & irqs enabled */
-#define ATH_STAT_2G_DISABLED   5               /* multiband radio without 2G */
+#define ATH_STAT_PROMISC       1
+#define ATH_STAT_LEDSOFT       2               /* enable LED gpio status */
+#define ATH_STAT_STARTED       3               /* opened & irqs enabled */
 
        unsigned int            filter_flags;   /* HW flags, AR5K_RX_FILTER_* */
        struct ieee80211_channel *curchan;      /* current h/w channel */
@@ -1097,6 +1297,7 @@ struct ath5k_hw {
                                led_on;         /* pin setting for LED on */
 
        struct work_struct      reset_work;     /* deferred chip reset */
+       struct work_struct      calib_work;     /* deferred phy calibration */
 
        struct list_head        rxbuf;          /* receive buffer */
        spinlock_t              rxbuflock;
@@ -1113,8 +1314,6 @@ struct ath5k_hw {
 
        struct ath5k_rfkill     rf_kill;
 
-       struct tasklet_struct   calib;          /* calibration tasklet */
-
        spinlock_t              block;          /* protects beacon */
        struct tasklet_struct   beacontq;       /* beacon intr tasklet */
        struct list_head        bcbuf;          /* beacon buffer */
@@ -1144,7 +1343,7 @@ struct ath5k_hw {
        enum ath5k_int          ah_imr;
 
        struct ieee80211_channel *ah_current_channel;
-       bool                    ah_calibration;
+       bool                    ah_iq_cal_needed;
        bool                    ah_single_chip;
 
        enum ath5k_version      ah_version;
@@ -1187,7 +1386,13 @@ struct ath5k_hw {
        u32                     ah_txq_imr_cbrurn;
        u32                     ah_txq_imr_qtrig;
        u32                     ah_txq_imr_nofrm;
-       u32                     ah_txq_isr;
+
+       u32                     ah_txq_isr_txok_all;
+       u32                     ah_txq_isr_txurn;
+       u32                     ah_txq_isr_qcborn;
+       u32                     ah_txq_isr_qcburn;
+       u32                     ah_txq_isr_qtrig;
+
        u32                     *ah_rf_banks;
        size_t                  ah_rf_banks_size;
        size_t                  ah_rf_regs_count;
@@ -1228,8 +1433,8 @@ struct ath5k_hw {
 
        /* Calibration timestamp */
        unsigned long           ah_cal_next_full;
+       unsigned long           ah_cal_next_short;
        unsigned long           ah_cal_next_ani;
-       unsigned long           ah_cal_next_nf;
 
        /* Calibration mask */
        u8                      ah_cal_mask;
@@ -1338,11 +1543,11 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
 u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
 void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
 void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
+void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon,
+                                                       u32 interval);
 bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
 /* Init function */
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
-                                                               u8 mode);
+void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode);
 
 /* Queue Control Unit, DFS Control Unit Functions */
 int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
index 91627dd..d7114c7 100644 (file)
@@ -27,8 +27,7 @@
 #include "debug.h"
 
 /**
- * ath5k_hw_post - Power On Self Test helper function
- *
+ * ath5k_hw_post() - Power On Self Test helper function
  * @ah: The &struct ath5k_hw
  */
 static int ath5k_hw_post(struct ath5k_hw *ah)
@@ -92,8 +91,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_init - Check if hw is supported and init the needed structs
- *
+ * ath5k_hw_init() - Check if hw is supported and init the needed structs
  * @ah: The &struct ath5k_hw associated with the device
  *
  * Check if the device is supported, perform a POST and initialize the needed
@@ -298,7 +296,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
 
                /* Reset SERDES to load new settings */
                ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
-               mdelay(1);
+               usleep_range(1000, 1500);
        }
 
        /* Get misc capabilities */
@@ -308,11 +306,6 @@ int ath5k_hw_init(struct ath5k_hw *ah)
                goto err;
        }
 
-       if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
-               __clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
-               __clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
-       }
-
        /* Crypto settings */
        common->keymax = (ah->ah_version == AR5K_AR5210 ?
                          AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
@@ -349,8 +342,7 @@ err:
 }
 
 /**
- * ath5k_hw_deinit - Free the ath5k_hw struct
- *
+ * ath5k_hw_deinit() - Free the &struct ath5k_hw
  * @ah: The &struct ath5k_hw
  */
 void ath5k_hw_deinit(struct ath5k_hw *ah)
index b346d04..d366dad 100644 (file)
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
-int ath5k_modparam_nohwcrypt;
+bool ath5k_modparam_nohwcrypt;
 module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
-static int modparam_all_channels;
+static bool modparam_all_channels;
 module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
 MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
 
-static int modparam_fastchanswitch;
+static bool modparam_fastchanswitch;
 module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
 MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
 
+static int ath5k_modparam_no_hw_rfkill_switch;
+module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
+                                                               bool, S_IRUGO);
+MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
+
 
 /* Module info */
 MODULE_AUTHOR("Jiri Slaby");
@@ -183,7 +188,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
        { .bitrate = 540,
          .hw_value = ATH5K_RATE_CODE_54M,
          .flags = 0 },
-       /* XR missing */
 };
 
 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -721,21 +725,24 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
        if (ret)
                goto err_unmap;
 
-       memset(mrr_rate, 0, sizeof(mrr_rate));
-       memset(mrr_tries, 0, sizeof(mrr_tries));
-       for (i = 0; i < 3; i++) {
-               rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
-               if (!rate)
-                       break;
+       /* Set up MRR descriptor */
+       if (ah->ah_capabilities.cap_has_mrr_support) {
+               memset(mrr_rate, 0, sizeof(mrr_rate));
+               memset(mrr_tries, 0, sizeof(mrr_tries));
+               for (i = 0; i < 3; i++) {
+                       rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
+                       if (!rate)
+                               break;
 
-               mrr_rate[i] = rate->hw_value;
-               mrr_tries[i] = info->control.rates[i + 1].count;
-       }
+                       mrr_rate[i] = rate->hw_value;
+                       mrr_tries[i] = info->control.rates[i + 1].count;
+               }
 
-       ath5k_hw_setup_mrr_tx_desc(ah, ds,
-               mrr_rate[0], mrr_tries[0],
-               mrr_rate[1], mrr_tries[1],
-               mrr_rate[2], mrr_tries[2]);
+               ath5k_hw_setup_mrr_tx_desc(ah, ds,
+                       mrr_rate[0], mrr_tries[0],
+                       mrr_rate[1], mrr_tries[1],
+                       mrr_rate[2], mrr_tries[2]);
+       }
 
        ds->ds_link = 0;
        ds->ds_data = bf->skbaddr;
@@ -1689,7 +1696,7 @@ ath5k_tasklet_tx(unsigned long data)
        struct ath5k_hw *ah = (void *)data;
 
        for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
-               if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i)))
+               if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
                        ath5k_tx_processq(ah, &ah->txqs[i]);
 
        ah->tx_pending = false;
@@ -2005,7 +2012,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
        ah->nexttbtt = nexttbtt;
 
        intval |= AR5K_BEACON_ENA;
-       ath5k_hw_init_beacon(ah, nexttbtt, intval);
+       ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
 
        /*
         * debugging output last in order to preserve the time critical aspect
@@ -2112,16 +2119,29 @@ static void
 ath5k_intr_calibration_poll(struct ath5k_hw *ah)
 {
        if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
-           !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
-               /* run ANI only when full calibration is not active */
+          !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+          !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+               /* Run ANI only when calibration is not active */
+
                ah->ah_cal_next_ani = jiffies +
                        msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
                tasklet_schedule(&ah->ani_tasklet);
 
-       } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
-               ah->ah_cal_next_full = jiffies +
-                       msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
-               tasklet_schedule(&ah->calib);
+       } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
+               !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+               !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+               /* Run calibration only when another calibration
+                * is not running.
+                *
+                * Note: This is for both full/short calibration,
+                * if it's time for a full one, ath5k_calibrate_work will deal
+                * with it. */
+
+               ah->ah_cal_next_short = jiffies +
+                       msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+               ieee80211_queue_work(ah->hw, &ah->calib_work);
        }
        /* we could use SWI to generate enough interrupts to meet our
         * calibration interval requirements, if necessary:
@@ -2149,69 +2169,110 @@ ath5k_intr(int irq, void *dev_id)
        enum ath5k_int status;
        unsigned int counter = 1000;
 
+
+       /*
+        * If hw is not ready (or detached) and we get an
+        * interrupt, or if we have no interrupts pending
+        * (that means it's not for us) skip it.
+        *
+        * NOTE: Group 0/1 PCI interface registers are not
+        * supported on WiSOCs, so we can't check for pending
+        * interrupts (ISR belongs to another register group
+        * so we are ok).
+        */
        if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
-               ((ath5k_get_bus_type(ah) != ATH_AHB) &&
-                               !ath5k_hw_is_intr_pending(ah))))
+                       ((ath5k_get_bus_type(ah) != ATH_AHB) &&
+                       !ath5k_hw_is_intr_pending(ah))))
                return IRQ_NONE;
 
+       /** Main loop **/
        do {
-               ath5k_hw_get_isr(ah, &status);          /* NB: clears IRQ too */
+               ath5k_hw_get_isr(ah, &status);  /* NB: clears IRQ too */
+
                ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
                                status, ah->imask);
+
+               /*
+                * Fatal hw error -> Log and reset
+                *
+                * Fatal errors are unrecoverable so we have to
+                * reset the card. These errors include bus and
+                * dma errors.
+                */
                if (unlikely(status & AR5K_INT_FATAL)) {
-                       /*
-                        * Fatal errors are unrecoverable.
-                        * Typically these are caused by DMA errors.
-                        */
+
                        ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
                                  "fatal int, resetting\n");
                        ieee80211_queue_work(ah->hw, &ah->reset_work);
+
+               /*
+                * RX Overrun -> Count and reset if needed
+                *
+                * Receive buffers are full. Either the bus is busy or
+                * the CPU is not fast enough to process all received
+                * frames.
+                */
                } else if (unlikely(status & AR5K_INT_RXORN)) {
+
                        /*
-                        * Receive buffers are full. Either the bus is busy or
-                        * the CPU is not fast enough to process all received
-                        * frames.
                         * Older chipsets need a reset to come out of this
                         * condition, but we treat it as RX for newer chips.
-                        * We don't know exactly which versions need a reset -
+                        * We don't know exactly which versions need a reset
                         * this guess is copied from the HAL.
                         */
                        ah->stats.rxorn_intr++;
+
                        if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
                                ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
                                          "rx overrun, resetting\n");
                                ieee80211_queue_work(ah->hw, &ah->reset_work);
                        } else
                                ath5k_schedule_rx(ah);
+
                } else {
+
+                       /* Software Beacon Alert -> Schedule beacon tasklet */
                        if (status & AR5K_INT_SWBA)
                                tasklet_hi_schedule(&ah->beacontq);
 
-                       if (status & AR5K_INT_RXEOL) {
-                               /*
-                               * NB: the hardware should re-read the link when
-                               *     RXE bit is written, but it doesn't work at
-                               *     least on older hardware revs.
-                               */
+                       /*
+                        * No more RX descriptors -> Just count
+                        *
+                        * NB: the hardware should re-read the link when
+                        *     RXE bit is written, but it doesn't work at
+                        *     least on older hardware revs.
+                        */
+                       if (status & AR5K_INT_RXEOL)
                                ah->stats.rxeol_intr++;
-                       }
-                       if (status & AR5K_INT_TXURN) {
-                               /* bump tx trigger level */
+
+
+                       /* TX Underrun -> Bump tx trigger level */
+                       if (status & AR5K_INT_TXURN)
                                ath5k_hw_update_tx_triglevel(ah, true);
-                       }
+
+                       /* RX -> Schedule rx tasklet */
                        if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
                                ath5k_schedule_rx(ah);
-                       if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
-                                       | AR5K_INT_TXERR | AR5K_INT_TXEOL))
+
+                       /* TX -> Schedule tx tasklet */
+                       if (status & (AR5K_INT_TXOK
+                                       | AR5K_INT_TXDESC
+                                       | AR5K_INT_TXERR
+                                       | AR5K_INT_TXEOL))
                                ath5k_schedule_tx(ah);
-                       if (status & AR5K_INT_BMISS) {
-                               /* TODO */
-                       }
+
+                       /* Missed beacon -> TODO
+                       if (status & AR5K_INT_BMISS)
+                       */
+
+                       /* MIB event -> Update counters and notify ANI */
                        if (status & AR5K_INT_MIB) {
                                ah->stats.mib_intr++;
                                ath5k_hw_update_mib_counters(ah);
                                ath5k_ani_mib_intr(ah);
                        }
+
+                       /* GPIO -> Notify RFKill layer */
                        if (status & AR5K_INT_GPIO)
                                tasklet_schedule(&ah->rf_kill.toggleq);
 
@@ -2222,12 +2283,19 @@ ath5k_intr(int irq, void *dev_id)
 
        } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
 
+       /*
+        * Until we handle rx/tx interrupts mask them on IMR
+        *
+        * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
+        * and unset after we 've handled the interrupts.
+        */
        if (ah->rx_pending || ah->tx_pending)
                ath5k_set_current_imask(ah);
 
        if (unlikely(!counter))
                ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
 
+       /* Fire up calibration poll */
        ath5k_intr_calibration_poll(ah);
 
        return IRQ_HANDLED;
@@ -2238,41 +2306,58 @@ ath5k_intr(int irq, void *dev_id)
  * for temperature/environment changes.
  */
 static void
-ath5k_tasklet_calibrate(unsigned long data)
+ath5k_calibrate_work(struct work_struct *work)
 {
-       struct ath5k_hw *ah = (void *)data;
+       struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
+               calib_work);
+
+       /* Should we run a full calibration ? */
+       if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+
+               ah->ah_cal_next_full = jiffies +
+                       msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+               ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
+
+               ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+                               "running full calibration\n");
+
+               if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
+                       /*
+                        * Rfgain is out of bounds, reset the chip
+                        * to load new gain values.
+                        */
+                       ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+                                       "got new rfgain, resetting\n");
+                       ieee80211_queue_work(ah->hw, &ah->reset_work);
+               }
+
+               /* TODO: On full calibration we should stop TX here,
+                * so that it doesn't interfere (mostly due to gain_f
+                * calibration that messes with tx packets -see phy.c).
+                *
+                * NOTE: Stopping the queues from above is not enough
+                * to stop TX but saves us from disconecting (at least
+                * we don't lose packets). */
+               ieee80211_stop_queues(ah->hw);
+       } else
+               ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
 
-       /* Only full calibration for now */
-       ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
 
        ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
                ieee80211_frequency_to_channel(ah->curchan->center_freq),
                ah->curchan->hw_value);
 
-       if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
-               /*
-                * Rfgain is out of bounds, reset the chip
-                * to load new gain values.
-                */
-               ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
-               ieee80211_queue_work(ah->hw, &ah->reset_work);
-       }
        if (ath5k_hw_phy_calibrate(ah, ah->curchan))
                ATH5K_ERR(ah, "calibration of channel %u failed\n",
                        ieee80211_frequency_to_channel(
                                ah->curchan->center_freq));
 
-       /* Noise floor calibration interrupts rx/tx path while I/Q calibration
-        * doesn't.
-        * TODO: We should stop TX here, so that it doesn't interfere.
-        * Note that stopping the queues is not enough to stop TX! */
-       if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
-               ah->ah_cal_next_nf = jiffies +
-                       msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
-               ath5k_hw_update_noise_floor(ah);
-       }
-
-       ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+       /* Clear calibration flags */
+       if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
+               ieee80211_wake_queues(ah->hw);
+               ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+       } else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
+               ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
 }
 
 
@@ -2407,8 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
        if (ret)
                goto err_irq;
 
-       /* set up multi-rate retry capabilities */
-       if (ah->ah_version == AR5K_AR5212) {
+       /* Set up multi-rate retry capabilities */
+       if (ah->ah_capabilities.cap_has_mrr_support) {
                hw->max_rates = 4;
                hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
                                         AR5K_INIT_RETRY_LONG);
@@ -2544,15 +2629,22 @@ int ath5k_start(struct ieee80211_hw *hw)
         * and then setup of the interrupt mask.
         */
        ah->curchan = ah->hw->conf.channel;
-       ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
-               AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
-               AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+       ah->imask = AR5K_INT_RXOK
+               | AR5K_INT_RXERR
+               | AR5K_INT_RXEOL
+               | AR5K_INT_RXORN
+               | AR5K_INT_TXDESC
+               | AR5K_INT_TXEOL
+               | AR5K_INT_FATAL
+               | AR5K_INT_GLOBAL
+               | AR5K_INT_MIB;
 
        ret = ath5k_reset(ah, NULL, false);
        if (ret)
                goto done;
 
-       ath5k_rfkill_hw_start(ah);
+       if (!ath5k_modparam_no_hw_rfkill_switch)
+               ath5k_rfkill_hw_start(ah);
 
        /*
         * Reset the key cache since some parts do not reset the
@@ -2585,7 +2677,6 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
        ah->tx_pending = false;
        tasklet_kill(&ah->rxtq);
        tasklet_kill(&ah->txtq);
-       tasklet_kill(&ah->calib);
        tasklet_kill(&ah->beacontq);
        tasklet_kill(&ah->ani_tasklet);
 }
@@ -2637,7 +2728,8 @@ void ath5k_stop(struct ieee80211_hw *hw)
 
        cancel_delayed_work_sync(&ah->tx_complete_work);
 
-       ath5k_rfkill_hw_stop(ah);
+       if (!ath5k_modparam_no_hw_rfkill_switch)
+               ath5k_rfkill_hw_stop(ah);
 }
 
 /*
@@ -2689,9 +2781,24 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
 
        ath5k_ani_init(ah, ani_mode);
 
-       ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100);
-       ah->ah_cal_next_ani = jiffies;
-       ah->ah_cal_next_nf = jiffies;
+       /*
+        * Set calibration intervals
+        *
+        * Note: We don't need to run calibration imediately
+        * since some initial calibration is done on reset
+        * even for fast channel switching. Also on scanning
+        * this will get set again and again and it won't get
+        * executed unless we connect somewhere and spend some
+        * time on the channel (that's what calibration needs
+        * anyway to be accurate).
+        */
+       ah->ah_cal_next_full = jiffies +
+               msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+       ah->ah_cal_next_ani = jiffies +
+               msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+       ah->ah_cal_next_short = jiffies +
+               msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+
        ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
 
        /* clear survey data and cycle counters */
@@ -2744,20 +2851,6 @@ ath5k_init(struct ieee80211_hw *hw)
        int ret;
 
 
-       /*
-        * Check if the MAC has multi-rate retry support.
-        * We do this by trying to setup a fake extended
-        * descriptor.  MACs that don't have support will
-        * return false w/o doing anything.  MACs that do
-        * support it will return true w/o doing anything.
-        */
-       ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
-
-       if (ret < 0)
-               goto err;
-       if (ret > 0)
-               __set_bit(ATH_STAT_MRRETRY, ah->status);
-
        /*
         * Collect the channel list.  The 802.11 layer
         * is responsible for filtering this list based
@@ -2841,11 +2934,11 @@ ath5k_init(struct ieee80211_hw *hw)
 
        tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
        tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
-       tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
        tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
        tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
 
        INIT_WORK(&ah->reset_work, ath5k_reset_work);
+       INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
        INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
 
        ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
index 810fba9..994169a 100644 (file)
@@ -85,12 +85,19 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
                        caps->cap_range.range_2ghz_min = 2412;
                        caps->cap_range.range_2ghz_max = 2732;
 
-                       if (AR5K_EEPROM_HDR_11B(ee_header))
-                               __set_bit(AR5K_MODE_11B, caps->cap_mode);
-
-                       if (AR5K_EEPROM_HDR_11G(ee_header) &&
-                           ah->ah_version != AR5K_AR5211)
-                               __set_bit(AR5K_MODE_11G, caps->cap_mode);
+                       /* Override 2GHz modes on SoCs that need it
+                        * NOTE: cap_needs_2GHz_ovr gets set from
+                        * ath_ahb_probe */
+                       if (!caps->cap_needs_2GHz_ovr) {
+                               if (AR5K_EEPROM_HDR_11B(ee_header))
+                                       __set_bit(AR5K_MODE_11B,
+                                                       caps->cap_mode);
+
+                               if (AR5K_EEPROM_HDR_11G(ee_header) &&
+                               ah->ah_version != AR5K_AR5211)
+                                       __set_bit(AR5K_MODE_11G,
+                                                       caps->cap_mode);
+                       }
                }
        }
 
@@ -103,12 +110,18 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
        else
                caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
 
-       /* newer hardware has PHY error counters */
+       /* Newer hardware has PHY error counters */
        if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
                caps->cap_has_phyerr_counters = true;
        else
                caps->cap_has_phyerr_counters = false;
 
+       /* MACs since AR5212 have MRR support */
+       if (ah->ah_version == AR5K_AR5212)
+               caps->cap_has_mrr_support = true;
+       else
+               caps->cap_has_mrr_support = false;
+
        return 0;
 }
 
index 7e88dda..f8bfa3a 100644 (file)
 #include "debug.h"
 
 
+/**
+ * DOC: Hardware descriptor functions
+ *
+ * Here we handle the processing of the low-level hw descriptors
+ * that hw reads and writes via DMA for each TX and RX attempt (that means
+ * we can also have descriptors for failed TX/RX tries). We have two kind of
+ * descriptors for RX and TX, control descriptors tell the hw how to send or
+ * receive a packet where to read/write it from/to etc and status descriptors
+ * that contain information about how the packet was sent or received (errors
+ * included).
+ *
+ * Descriptor format is not exactly the same for each MAC chip version so we
+ * have function pointers on &struct ath5k_hw we initialize at runtime based on
+ * the chip used.
+ */
+
+
 /************************\
 * TX Control descriptors *
 \************************/
 
-/*
- * Initialize the 2-word tx control descriptor on 5210/5211
+/**
+ * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 2-Word TX control descriptor
+ * found on AR5210 and AR5211 MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
  */
 static int
-ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-       unsigned int pkt_len, unsigned int hdr_len, int padsize,
-       enum ath5k_pkt_type type,
-       unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
-       unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
-       unsigned int rtscts_rate, unsigned int rtscts_duration)
+ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
+                       struct ath5k_desc *desc,
+                       unsigned int pkt_len, unsigned int hdr_len,
+                       int padsize,
+                       enum ath5k_pkt_type type,
+                       unsigned int tx_power,
+                       unsigned int tx_rate0, unsigned int tx_tries0,
+                       unsigned int key_index,
+                       unsigned int antenna_mode,
+                       unsigned int flags,
+                       unsigned int rtscts_rate, unsigned int rtscts_duration)
 {
        u32 frame_type;
        struct ath5k_hw_2w_tx_ctl *tx_ctl;
@@ -172,17 +213,40 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
        return 0;
 }
 
-/*
- * Initialize the 4-word tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 4-Word TX control descriptor
+ * found on AR5212 and later MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
  */
-static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
-       struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
-       int padsize,
-       enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
-       unsigned int tx_tries0, unsigned int key_index,
-       unsigned int antenna_mode, unsigned int flags,
-       unsigned int rtscts_rate,
-       unsigned int rtscts_duration)
+static int
+ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
+                       struct ath5k_desc *desc,
+                       unsigned int pkt_len, unsigned int hdr_len,
+                       int padsize,
+                       enum ath5k_pkt_type type,
+                       unsigned int tx_power,
+                       unsigned int tx_rate0, unsigned int tx_tries0,
+                       unsigned int key_index,
+                       unsigned int antenna_mode,
+                       unsigned int flags,
+                       unsigned int rtscts_rate, unsigned int rtscts_duration)
 {
        struct ath5k_hw_4w_tx_ctl *tx_ctl;
        unsigned int frame_len;
@@ -292,13 +356,29 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Initialize a 4-word multi rate retry tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @tx_rate1: HW idx for rate used on transmission series 1
+ * @tx_tries1: Max number of retransmissions for transmission series 1
+ * @tx_rate2: HW idx for rate used on transmission series 2
+ * @tx_tries2: Max number of retransmissions for transmission series 2
+ * @tx_rate3: HW idx for rate used on transmission series 3
+ * @tx_tries3: Max number of retransmissions for transmission series 3
+ *
+ * Multi rate retry (MRR) tx control descriptors are available only on AR5212
+ * MACs, they are part of the normal 4-word tx control descriptor (see above)
+ * but we handle them through a separate function for better abstraction.
+ *
+ * Returns 0 on success or -EINVAL on invalid input
  */
 int
-ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-       unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
-       u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
+ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
+                       struct ath5k_desc *desc,
+                       u_int tx_rate1, u_int tx_tries1,
+                       u_int tx_rate2, u_int tx_tries2,
+                       u_int tx_rate3, u_int tx_tries3)
 {
        struct ath5k_hw_4w_tx_ctl *tx_ctl;
 
@@ -350,11 +430,16 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
 * TX Status descriptors *
 \***********************/
 
-/*
- * Process the tx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
  */
-static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
-               struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
+                               struct ath5k_desc *desc,
+                               struct ath5k_tx_status *ts)
 {
        struct ath5k_hw_2w_tx_ctl *tx_ctl;
        struct ath5k_hw_tx_status *tx_status;
@@ -399,11 +484,16 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Process a tx status descriptor on 5212
+/**
+ * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
  */
-static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
-               struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
+                               struct ath5k_desc *desc,
+                               struct ath5k_tx_status *ts)
 {
        struct ath5k_hw_4w_tx_ctl *tx_ctl;
        struct ath5k_hw_tx_status *tx_status;
@@ -460,11 +550,17 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
 * RX Descriptors *
 \****************/
 
-/*
- * Initialize an rx control descriptor
+/**
+ * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @size: RX buffer length in bytes
+ * @flags: One of AR5K_RXDESC_* flags
  */
-int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-                          u32 size, unsigned int flags)
+int
+ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
+                       struct ath5k_desc *desc,
+                       u32 size, unsigned int flags)
 {
        struct ath5k_hw_rx_ctl *rx_ctl;
 
@@ -491,11 +587,22 @@ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
        return 0;
 }
 
-/*
- * Process the rx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5210/5211 MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
  */
-static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
-               struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
+                               struct ath5k_desc *desc,
+                               struct ath5k_rx_status *rs)
 {
        struct ath5k_hw_rx_status *rx_status;
 
@@ -574,12 +681,22 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Process the rx status descriptor on 5212
+/**
+ * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5212 and later MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
  */
-static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
-                                       struct ath5k_desc *desc,
-                                       struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
+                               struct ath5k_desc *desc,
+                               struct ath5k_rx_status *rs)
 {
        struct ath5k_hw_rx_status *rx_status;
        u32 rxstat0, rxstat1;
@@ -646,10 +763,16 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
 * Attach *
 \********/
 
-/*
- * Init function pointers inside ath5k_hw struct
+/**
+ * ath5k_hw_init_desc_functions() - Init function pointers inside ah
+ * @ah: The &struct ath5k_hw
+ *
+ * Maps the internal descriptor functions to the function pointers on ah, used
+ * from above. This is used as an abstraction layer to handle the various chips
+ * the same way.
  */
-int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
+int
+ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
 {
        if (ah->ah_version == AR5K_AR5212) {
                ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
index cfd529b..8d6c01a 100644 (file)
  * RX/TX descriptor structures
  */
 
-/*
- * Common hardware RX control descriptor
+/**
+ * struct ath5k_hw_rx_ctl - Common hardware RX control descriptor
+ * @rx_control_0: RX control word 0
+ * @rx_control_1: RX control word 1
  */
 struct ath5k_hw_rx_ctl {
-       u32     rx_control_0; /* RX control word 0 */
-       u32     rx_control_1; /* RX control word 1 */
+       u32     rx_control_0;
+       u32     rx_control_1;
 } __packed __aligned(4);
 
 /* RX control word 1 fields/flags */
 #define AR5K_DESC_RX_CTL1_BUF_LEN              0x00000fff /* data buffer length */
 #define AR5K_DESC_RX_CTL1_INTREQ               0x00002000 /* RX interrupt request */
 
-/*
- * Common hardware RX status descriptor
+/**
+ * struct ath5k_hw_rx_status - Common hardware RX status descriptor
+ * @rx_status_0: RX status word 0
+ * @rx_status_1: RX status word 1
+ *
  * 5210, 5211 and 5212 differ only in the fields and flags defined below
  */
 struct ath5k_hw_rx_status {
-       u32     rx_status_0; /* RX status word 0 */
-       u32     rx_status_1; /* RX status word 1 */
+       u32     rx_status_0;
+       u32     rx_status_1;
 } __packed __aligned(4);
 
 /* 5210/5211 */
@@ -98,17 +103,36 @@ struct ath5k_hw_rx_status {
 
 /**
  * enum ath5k_phy_error_code - PHY Error codes
+ * @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error
+ * @AR5K_RX_PHY_ERROR_TIMING: Timing error
+ * @AR5K_RX_PHY_ERROR_PARITY: Illegal parity
+ * @AR5K_RX_PHY_ERROR_RATE: Illegal rate
+ * @AR5K_RX_PHY_ERROR_LENGTH: Illegal length
+ * @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate
+ * @AR5K_RX_PHY_ERROR_SERVICE: Illegal service
+ * @AR5K_RX_PHY_ERROR_TOR: Transmit override receive
+ * @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+]
  */
 enum ath5k_phy_error_code {
-       AR5K_RX_PHY_ERROR_UNDERRUN              = 0,    /* Transmit underrun, [5210] No error */
-       AR5K_RX_PHY_ERROR_TIMING                = 1,    /* Timing error */
-       AR5K_RX_PHY_ERROR_PARITY                = 2,    /* Illegal parity */
-       AR5K_RX_PHY_ERROR_RATE                  = 3,    /* Illegal rate */
-       AR5K_RX_PHY_ERROR_LENGTH                = 4,    /* Illegal length */
-       AR5K_RX_PHY_ERROR_RADAR                 = 5,    /* Radar detect, [5210] 64 QAM rate */
-       AR5K_RX_PHY_ERROR_SERVICE               = 6,    /* Illegal service */
-       AR5K_RX_PHY_ERROR_TOR                   = 7,    /* Transmit override receive */
-       /* these are specific to the 5212 */
+       AR5K_RX_PHY_ERROR_UNDERRUN              = 0,
+       AR5K_RX_PHY_ERROR_TIMING                = 1,
+       AR5K_RX_PHY_ERROR_PARITY                = 2,
+       AR5K_RX_PHY_ERROR_RATE                  = 3,
+       AR5K_RX_PHY_ERROR_LENGTH                = 4,
+       AR5K_RX_PHY_ERROR_RADAR                 = 5,
+       AR5K_RX_PHY_ERROR_SERVICE               = 6,
+       AR5K_RX_PHY_ERROR_TOR                   = 7,
        AR5K_RX_PHY_ERROR_OFDM_TIMING           = 17,
        AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY    = 18,
        AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL     = 19,
@@ -123,12 +147,14 @@ enum ath5k_phy_error_code {
        AR5K_RX_PHY_ERROR_CCK_RESTART           = 31,
 };
 
-/*
- * 5210/5211 hardware 2-word TX control descriptor
+/**
+ * struct ath5k_hw_2w_tx_ctl  - 5210/5211 hardware 2-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
  */
 struct ath5k_hw_2w_tx_ctl {
-       u32     tx_control_0; /* TX control word 0 */
-       u32     tx_control_1; /* TX control word 1 */
+       u32     tx_control_0;
+       u32     tx_control_1;
 } __packed __aligned(4);
 
 /* TX control word 0 fields/flags */
@@ -177,14 +203,18 @@ struct ath5k_hw_2w_tx_ctl {
 #define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS    4
 #define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP   4
 
-/*
- * 5212 hardware 4-word TX control descriptor
+/**
+ * struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
+ * @tx_control_2: TX control word 2
+ * @tx_control_3: TX control word 3
  */
 struct ath5k_hw_4w_tx_ctl {
-       u32     tx_control_0; /* TX control word 0 */
-       u32     tx_control_1; /* TX control word 1 */
-       u32     tx_control_2; /* TX control word 2 */
-       u32     tx_control_3; /* TX control word 3 */
+       u32     tx_control_0;
+       u32     tx_control_1;
+       u32     tx_control_2;
+       u32     tx_control_3;
 } __packed __aligned(4);
 
 /* TX control word 0 fields/flags */
@@ -238,12 +268,14 @@ struct ath5k_hw_4w_tx_ctl {
 #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE      0x01f00000 /* RTS or CTS rate */
 #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S    20
 
-/*
- * Common TX status descriptor
+/**
+ * struct ath5k_hw_tx_status - Common TX status descriptor
+ * @tx_status_0: TX status word 0
+ * @tx_status_1: TX status word 1
  */
 struct ath5k_hw_tx_status {
-       u32     tx_status_0; /* TX status word 0 */
-       u32     tx_status_1; /* TX status word 1 */
+       u32     tx_status_0;
+       u32     tx_status_1;
 } __packed __aligned(4);
 
 /* TX status word 0 fields/flags */
@@ -276,37 +308,47 @@ struct ath5k_hw_tx_status {
 #define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
 #define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
 
-/*
- * 5210/5211 hardware TX descriptor
+/**
+ * struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_2w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
  */
 struct ath5k_hw_5210_tx_desc {
        struct ath5k_hw_2w_tx_ctl       tx_ctl;
        struct ath5k_hw_tx_status       tx_stat;
 } __packed __aligned(4);
 
-/*
- * 5212 hardware TX descriptor
+/**
+ * struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_4w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
  */
 struct ath5k_hw_5212_tx_desc {
        struct ath5k_hw_4w_tx_ctl       tx_ctl;
        struct ath5k_hw_tx_status       tx_stat;
 } __packed __aligned(4);
 
-/*
- * Common hardware RX descriptor
+/**
+ * struct ath5k_hw_all_rx_desc - Common hardware RX descriptor
+ * @rx_ctl: The &struct ath5k_hw_rx_ctl
+ * @rx_stat: The &struct ath5k_hw_rx_status
  */
 struct ath5k_hw_all_rx_desc {
        struct ath5k_hw_rx_ctl          rx_ctl;
        struct ath5k_hw_rx_status       rx_stat;
 } __packed __aligned(4);
 
-/*
- * Atheros hardware DMA descriptor
+/**
+ * struct ath5k_desc - Atheros hardware DMA descriptor
+ * @ds_link: Physical address of the next descriptor
+ * @ds_data: Physical address of data buffer (skb)
+ * @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc
+ *
  * This is read and written to by the hardware
  */
 struct ath5k_desc {
-       u32     ds_link;        /* physical address of the next descriptor */
-       u32     ds_data;        /* physical address of data buffer (skb) */
+       u32     ds_link;
+       u32     ds_data;
 
        union {
                struct ath5k_hw_5210_tx_desc    ds_tx5210;
index 2481f9c..5cc9aa8 100644 (file)
 * DMA and interrupt masking functions *
 \*************************************/
 
-/*
- * dma.c - DMA and interrupt masking functions
+/**
+ * DOC: DMA and interrupt masking functions
  *
  * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
  * handle queue setup for 5210 chipset (rest are handled on qcu.c).
  * Also we setup interrupt mask register (IMR) and read the various interrupt
  * status registers (ISR).
- *
- * TODO: Handle SISR on 5211+ and introduce a function to return the queue
- * number that resulted the interrupt.
  */
 
 #include "ath5k.h"
 \*********/
 
 /**
- * ath5k_hw_start_rx_dma - Start DMA receive
- *
+ * ath5k_hw_start_rx_dma() - Start DMA receive
  * @ah:        The &struct ath5k_hw
  */
-void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
 {
        ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
        ath5k_hw_reg_read(ah, AR5K_CR);
 }
 
 /**
- * ath5k_hw_stop_rx_dma - Stop DMA receive
- *
+ * ath5k_hw_stop_rx_dma() - Stop DMA receive
  * @ah:        The &struct ath5k_hw
  */
-static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
+static int
+ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
 {
        unsigned int i;
 
@@ -79,24 +76,24 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_get_rxdp - Get RX Descriptor's address
- *
+ * ath5k_hw_get_rxdp() - Get RX Descriptor's address
  * @ah: The &struct ath5k_hw
  */
-u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rxdp(struct ath5k_hw *ah)
 {
        return ath5k_hw_reg_read(ah, AR5K_RXDP);
 }
 
 /**
- * ath5k_hw_set_rxdp - Set RX Descriptor's address
- *
+ * ath5k_hw_set_rxdp() - Set RX Descriptor's address
  * @ah: The &struct ath5k_hw
  * @phys_addr: RX descriptor address
  *
  * Returns -EIO if rx is active
  */
-int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
+int
+ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
 {
        if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
                ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
@@ -114,8 +111,7 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
 \**********/
 
 /**
- * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
- *
+ * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
  *
@@ -128,7 +124,8 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
  * NOTE: Must be called after setting up tx control descriptor for that
  * queue (see below).
  */
-int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
 {
        u32 tx_queue;
 
@@ -177,17 +174,16 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
 }
 
 /**
- * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
- *
+ * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
  *
  * Stop DMA transmit on a specific hw queue and drain queue so we don't
  * have any pending frames. Returns -EBUSY if we still have pending frames,
  * -EINVAL if queue number is out of range or inactive.
- *
  */
-static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+static int
+ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
 {
        unsigned int i = 40;
        u32 tx_queue, pending;
@@ -320,14 +316,14 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
 }
 
 /**
- * ath5k_hw_stop_beacon_queue - Stop beacon queue
- *
- * @ah The &struct ath5k_hw
- * @queue The queue number
+ * ath5k_hw_stop_beacon_queue() - Stop beacon queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The queue number
  *
  * Returns -EIO if queue didn't stop
  */
-int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
 {
        int ret;
        ret = ath5k_hw_stop_tx_dma(ah, queue);
@@ -340,8 +336,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
 }
 
 /**
- * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
  *
@@ -352,7 +347,8 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
  *
  * XXX: Is TXDP read and clear ?
  */
-u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
 {
        u16 tx_reg;
 
@@ -382,10 +378,10 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
 }
 
 /**
- * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
+ * @phys_addr: The physical address
  *
  * Set TX descriptor's address for a specific queue. For 5210 we ignore
  * the queue number and we use tx queue type since we only have 2 queues
@@ -394,7 +390,8 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
  * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
  * active.
  */
-int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
+int
+ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
 {
        u16 tx_reg;
 
@@ -435,8 +432,7 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
 }
 
 /**
- * ath5k_hw_update_tx_triglevel - Update tx trigger level
- *
+ * ath5k_hw_update_tx_triglevel() - Update tx trigger level
  * @ah: The &struct ath5k_hw
  * @increase: Flag to force increase of trigger level
  *
@@ -444,15 +440,15 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
  * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
  * the buffer and transmits its data. Lowering this results sending small
  * frames more quickly but can lead to tx underruns, raising it a lot can
- * result other problems (i think bmiss is related). Right now we start with
- * the lowest possible (64Bytes) and if we get tx underrun we increase it using
- * the increase flag. Returns -EIO if we have reached maximum/minimum.
+ * result other problems. Right now we start with the lowest possible
+ * (64Bytes) and if we get tx underrun we increase it using the increase
+ * flag. Returns -EIO if we have reached maximum/minimum.
  *
  * XXX: Link this with tx DMA size ?
- * XXX: Use it to save interrupts ?
- * TODO: Needs testing, i think it's related to bmiss...
+ * XXX2: Use it to save interrupts ?
  */
-int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
+int
+ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
 {
        u32 trigger_level, imr;
        int ret = -EIO;
@@ -498,21 +494,20 @@ done:
 \*******************/
 
 /**
- * ath5k_hw_is_intr_pending - Check if we have pending interrupts
- *
+ * ath5k_hw_is_intr_pending() - Check if we have pending interrupts
  * @ah: The &struct ath5k_hw
  *
  * Check if we have pending interrupts to process. Returns 1 if we
  * have pending interrupts and 0 if we haven't.
  */
-bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
+bool
+ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
 {
        return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
 }
 
 /**
- * ath5k_hw_get_isr - Get interrupt status
- *
+ * ath5k_hw_get_isr() - Get interrupt status
  * @ah: The @struct ath5k_hw
  * @interrupt_mask: Driver's interrupt mask used to filter out
  * interrupts in sw.
@@ -523,62 +518,162 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
  * being mapped on some standard non hw-specific positions
  * (check out &ath5k_int).
  *
- * NOTE: We use read-and-clear register, so after this function is called ISR
- * is zeroed.
+ * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
+ * function gets called are cleared on return.
  */
-int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
+int
+ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
 {
-       u32 data;
+       u32 data = 0;
 
        /*
-        * Read interrupt status from the Interrupt Status register
-        * on 5210
+        * Read interrupt status from Primary Interrupt
+        * Register.
+        *
+        * Note: PISR/SISR Not available on 5210
         */
        if (ah->ah_version == AR5K_AR5210) {
-               data = ath5k_hw_reg_read(ah, AR5K_ISR);
-               if (unlikely(data == AR5K_INT_NOCARD)) {
-                       *interrupt_mask = data;
+               u32 isr = 0;
+               isr = ath5k_hw_reg_read(ah, AR5K_ISR);
+               if (unlikely(isr == AR5K_INT_NOCARD)) {
+                       *interrupt_mask = isr;
                        return -ENODEV;
                }
-       } else {
+
                /*
-                * Read interrupt status from Interrupt
-                * Status Register shadow copy (Read And Clear)
-                *
-                * Note: PISR/SISR Not available on 5210
+                * Filter out the non-common bits from the interrupt
+                * status.
                 */
-               data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
-               if (unlikely(data == AR5K_INT_NOCARD)) {
-                       *interrupt_mask = data;
+               *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
+
+               /* Hanlde INT_FATAL */
+               if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
+                                               | AR5K_ISR_DPERR)))
+                       *interrupt_mask |= AR5K_INT_FATAL;
+
+               /*
+                * XXX: BMISS interrupts may occur after association.
+                * I found this on 5210 code but it needs testing. If this is
+                * true we should disable them before assoc and re-enable them
+                * after a successful assoc + some jiffies.
+                       interrupt_mask &= ~AR5K_INT_BMISS;
+                */
+
+               data = isr;
+       } else {
+               u32 pisr = 0;
+               u32 pisr_clear = 0;
+               u32 sisr0 = 0;
+               u32 sisr1 = 0;
+               u32 sisr2 = 0;
+               u32 sisr3 = 0;
+               u32 sisr4 = 0;
+
+               /* Read PISR and SISRs... */
+               pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
+               if (unlikely(pisr == AR5K_INT_NOCARD)) {
+                       *interrupt_mask = pisr;
                        return -ENODEV;
                }
-       }
 
-       /*
-        * Get abstract interrupt mask (driver-compatible)
-        */
-       *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
+               sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
+               sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
+               sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
+               sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
+               sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
 
-       if (ah->ah_version != AR5K_AR5210) {
-               u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
+               /*
+                * PISR holds the logical OR of interrupt bits
+                * from SISR registers:
+                *
+                * TXOK and TXDESC  -> Logical OR of TXOK and TXDESC
+                *                      per-queue bits on SISR0
+                *
+                * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
+                *                      per-queue bits on SISR1
+                *
+                * TXURN -> Logical OR of TXURN per-queue bits on SISR2
+                *
+                * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
+                *
+                * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
+                *              BCN_TIMEOUT, CAB_TIMEOUT and DTIM
+                *              (and TSFOOR ?) bits on SISR2
+                *
+                * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
+                *                      QCBRURN per-queue bits on SISR3
+                * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
+                *
+                * If we clean these bits on PISR we 'll also clear all
+                * related bits from SISRs, e.g. if we write the TXOK bit on
+                * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
+                * interrupt got fired for another queue while we were reading
+                * the interrupt registers and we write back the TXOK bit on
+                * PISR we 'll lose it. So make sure that we don't write back
+                * on PISR any bits that come from SISRs. Clearing them from
+                * SISRs will also clear PISR so no need to worry here.
+                */
 
-               /*HIU = Host Interface Unit (PCI etc)*/
-               if (unlikely(data & (AR5K_ISR_HIUERR)))
-                       *interrupt_mask |= AR5K_INT_FATAL;
+               pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
 
-               /*Beacon Not Ready*/
-               if (unlikely(data & (AR5K_ISR_BNR)))
-                       *interrupt_mask |= AR5K_INT_BNR;
+               /*
+                * Write to clear them...
+                * Note: This means that each bit we write back
+                * to the registers will get cleared, leaving the
+                * rest unaffected. So this won't affect new interrupts
+                * we didn't catch while reading/processing, we 'll get
+                * them next time get_isr gets called.
+                */
+               ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
+               ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
+               ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
+               ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
+               ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
+               ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
+               /* Flush previous write */
+               ath5k_hw_reg_read(ah, AR5K_PISR);
 
-               if (unlikely(sisr2 & (AR5K_SISR2_SSERR |
-                                       AR5K_SISR2_DPERR |
-                                       AR5K_SISR2_MCABT)))
-                       *interrupt_mask |= AR5K_INT_FATAL;
+               /*
+                * Filter out the non-common bits from the interrupt
+                * status.
+                */
+               *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
+
+
+               /* We treat TXOK,TXDESC, TXERR and TXEOL
+                * the same way (schedule the tx tasklet)
+                * so we track them all together per queue */
+               if (pisr & AR5K_ISR_TXOK)
+                       ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+                                               AR5K_SISR0_QCU_TXOK);
 
-               if (data & AR5K_ISR_TIM)
+               if (pisr & AR5K_ISR_TXDESC)
+                       ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+                                               AR5K_SISR0_QCU_TXDESC);
+
+               if (pisr & AR5K_ISR_TXERR)
+                       ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+                                               AR5K_SISR1_QCU_TXERR);
+
+               if (pisr & AR5K_ISR_TXEOL)
+                       ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+                                               AR5K_SISR1_QCU_TXEOL);
+
+               /* Currently this is not much usefull since we treat
+                * all queues the same way if we get a TXURN (update
+                * tx trigger level) but we might need it later on*/
+               if (pisr & AR5K_ISR_TXURN)
+                       ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
+                                               AR5K_SISR2_QCU_TXURN);
+
+               /* Misc Beacon related interrupts */
+
+               /* For AR5211 */
+               if (pisr & AR5K_ISR_TIM)
                        *interrupt_mask |= AR5K_INT_TIM;
 
-               if (data & AR5K_ISR_BCNMISC) {
+               /* For AR5212+ */
+               if (pisr & AR5K_ISR_BCNMISC) {
                        if (sisr2 & AR5K_SISR2_TIM)
                                *interrupt_mask |= AR5K_INT_TIM;
                        if (sisr2 & AR5K_SISR2_DTIM)
@@ -591,63 +686,39 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
                                *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
                }
 
-               if (data & AR5K_ISR_RXDOPPLER)
-                       *interrupt_mask |= AR5K_INT_RX_DOPPLER;
-               if (data & AR5K_ISR_QCBRORN) {
+               /* Below interrupts are unlikely to happen */
+
+               /* HIU = Host Interface Unit (PCI etc)
+                * Can be one of MCABT, SSERR, DPERR from SISR2 */
+               if (unlikely(pisr & (AR5K_ISR_HIUERR)))
+                       *interrupt_mask |= AR5K_INT_FATAL;
+
+               /*Beacon Not Ready*/
+               if (unlikely(pisr & (AR5K_ISR_BNR)))
+                       *interrupt_mask |= AR5K_INT_BNR;
+
+               /* A queue got CBR overrun */
+               if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
                        *interrupt_mask |= AR5K_INT_QCBRORN;
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
-                                       AR5K_SISR3_QCBRORN);
+                       ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
+                                               AR5K_SISR3_QCBRORN);
                }
-               if (data & AR5K_ISR_QCBRURN) {
+
+               /* A queue got CBR underrun */
+               if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
                        *interrupt_mask |= AR5K_INT_QCBRURN;
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
-                                       AR5K_SISR3_QCBRURN);
+                       ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
+                                               AR5K_SISR3_QCBRURN);
                }
-               if (data & AR5K_ISR_QTRIG) {
+
+               /* A queue got triggered */
+               if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
                        *interrupt_mask |= AR5K_INT_QTRIG;
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
-                                       AR5K_SISR4_QTRIG);
+                       ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
+                                               AR5K_SISR4_QTRIG);
                }
 
-               if (data & AR5K_ISR_TXOK)
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
-                                       AR5K_SISR0_QCU_TXOK);
-
-               if (data & AR5K_ISR_TXDESC)
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
-                                       AR5K_SISR0_QCU_TXDESC);
-
-               if (data & AR5K_ISR_TXERR)
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
-                                       AR5K_SISR1_QCU_TXERR);
-
-               if (data & AR5K_ISR_TXEOL)
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
-                                       AR5K_SISR1_QCU_TXEOL);
-
-               if (data & AR5K_ISR_TXURN)
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
-                                       AR5K_SISR2_QCU_TXURN);
-       } else {
-               if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
-                               | AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
-                       *interrupt_mask |= AR5K_INT_FATAL;
-
-               /*
-                * XXX: BMISS interrupts may occur after association.
-                * I found this on 5210 code but it needs testing. If this is
-                * true we should disable them before assoc and re-enable them
-                * after a successful assoc + some jiffies.
-                       interrupt_mask &= ~AR5K_INT_BMISS;
-                */
+               data = pisr;
        }
 
        /*
@@ -661,8 +732,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
 }
 
 /**
- * ath5k_hw_set_imr - Set interrupt mask
- *
+ * ath5k_hw_set_imr() - Set interrupt mask
  * @ah: The &struct ath5k_hw
  * @new_mask: The new interrupt mask to be set
  *
@@ -670,7 +740,8 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
  * ath5k_int bits to hw-specific bits to remove abstraction and writing
  * Interrupt Mask Register.
  */
-enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
+enum ath5k_int
+ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
 {
        enum ath5k_int old_mask, int_mask;
 
@@ -697,16 +768,14 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
                u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
                                & AR5K_SIMR2_QCU_TXURN;
 
+               /* Fatal interrupt abstraction for 5211+ */
                if (new_mask & AR5K_INT_FATAL) {
                        int_mask |= AR5K_IMR_HIUERR;
                        simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
                                | AR5K_SIMR2_DPERR);
                }
 
-               /*Beacon Not Ready*/
-               if (new_mask & AR5K_INT_BNR)
-                       int_mask |= AR5K_INT_BNR;
-
+               /* Misc beacon related interrupts */
                if (new_mask & AR5K_INT_TIM)
                        int_mask |= AR5K_IMR_TIM;
 
@@ -721,8 +790,9 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
                if (new_mask & AR5K_INT_CAB_TIMEOUT)
                        simr2 |= AR5K_SISR2_CAB_TIMEOUT;
 
-               if (new_mask & AR5K_INT_RX_DOPPLER)
-                       int_mask |= AR5K_IMR_RXDOPPLER;
+               /*Beacon Not Ready*/
+               if (new_mask & AR5K_INT_BNR)
+                       int_mask |= AR5K_INT_BNR;
 
                /* Note: Per queue interrupt masks
                 * are set via ath5k_hw_reset_tx_queue() (qcu.c) */
@@ -730,10 +800,12 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
                ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
 
        } else {
+               /* Fatal interrupt abstraction for 5210 */
                if (new_mask & AR5K_INT_FATAL)
                        int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
                                | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
 
+               /* Only common interrupts left for 5210 (no SIMRs) */
                ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
        }
 
@@ -760,8 +832,7 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
 \********************/
 
 /**
- * ath5k_hw_dma_init - Initialize DMA unit
- *
+ * ath5k_hw_dma_init() - Initialize DMA unit
  * @ah: The &struct ath5k_hw
  *
  * Set DMA size and pre-enable interrupts
@@ -770,7 +841,8 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
  *
  * XXX: Save/restore RXDP/TXDP registers ?
  */
-void ath5k_hw_dma_init(struct ath5k_hw *ah)
+void
+ath5k_hw_dma_init(struct ath5k_hw *ah)
 {
        /*
         * Set Rx/Tx DMA Configuration
@@ -799,8 +871,7 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_dma_stop - stop DMA unit
- *
+ * ath5k_hw_dma_stop() - stop DMA unit
  * @ah: The &struct ath5k_hw
  *
  * Stop tx/rx DMA and interrupts. Returns
@@ -810,7 +881,8 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
  * stuck frames on tx queues, only a reset
  * can fix that.
  */
-int ath5k_hw_dma_stop(struct ath5k_hw *ah)
+int
+ath5k_hw_dma_stop(struct ath5k_hw *ah)
 {
        int i, qmax, err;
        err = 0;
index 8592978..73d3dd8 100644 (file)
 #include "reg.h"
 #include "debug.h"
 
-/*
- * Set led state
+
+/**
+ * DOC: GPIO/LED functions
+ *
+ * Here we control the 6 bidirectional GPIO pins provided by the hw.
+ * We can set a GPIO pin to be an input or an output pin on GPIO control
+ * register and then read or set its status from GPIO data input/output
+ * registers.
+ *
+ * We also control the two LED pins provided by the hw, LED_0 is our
+ * "power" LED and LED_1 is our "network activity" LED but many scenarios
+ * are available from hw. Vendors might also provide LEDs connected to the
+ * GPIO pins, we handle them through the LED subsystem on led.c
+ */
+
+
+/**
+ * ath5k_hw_set_ledstate() - Set led state
+ * @ah: The &struct ath5k_hw
+ * @state: One of AR5K_LED_*
+ *
+ * Used to set the LED blinking state. This only
+ * works for the LED connected to the LED_0, LED_1 pins,
+ * not the GPIO based.
  */
-void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
+void
+ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
 {
        u32 led;
        /*5210 has different led mode handling*/
@@ -74,10 +97,13 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
                AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
 }
 
-/*
- * Set GPIO inputs
+/**
+ * ath5k_hw_set_gpio_input() - Set GPIO inputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: GPIO pin to set as input
  */
-int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
 {
        if (gpio >= AR5K_NUM_GPIO)
                return -EINVAL;
@@ -89,10 +115,13 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
        return 0;
 }
 
-/*
- * Set GPIO outputs
+/**
+ * ath5k_hw_set_gpio_output() - Set GPIO outputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set as output
  */
-int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
 {
        if (gpio >= AR5K_NUM_GPIO)
                return -EINVAL;
@@ -104,10 +133,13 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
        return 0;
 }
 
-/*
- * Get GPIO state
+/**
+ * ath5k_hw_get_gpio() - Get GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to read
  */
-u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
+u32
+ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
 {
        if (gpio >= AR5K_NUM_GPIO)
                return 0xffffffff;
@@ -117,10 +149,14 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
                0x1;
 }
 
-/*
- * Set GPIO state
+/**
+ * ath5k_hw_set_gpio() - Set GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set
+ * @val: Value to set (boolean)
  */
-int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
+int
+ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
 {
        u32 data;
 
@@ -138,10 +174,19 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
        return 0;
 }
 
-/*
- * Initialize the GPIO interrupt (RFKill switch)
+/**
+ * ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to use
+ * @interrupt_level: True to generate interrupt on active pin (high)
+ *
+ * This function is used to set up the GPIO interrupt for the hw RFKill switch.
+ * That switch is connected to a GPIO pin and it's number is stored on EEPROM.
+ * It can either open or close the circuit to indicate that we should disable
+ * RF/Wireless to save power (we also get that from EEPROM).
  */
-void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+void
+ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
                u32 interrupt_level)
 {
        u32 data;
index 1ffecc0..a1ea78e 100644 (file)
 #include "reg.h"
 #include "debug.h"
 
-/*
- * Mode-independent initial register writes
+/**
+ * struct ath5k_ini - Mode-independent initial register writes
+ * @ini_register: Register address
+ * @ini_value: Default value
+ * @ini_mode: 0 to write 1 to read (and clear)
  */
-
 struct ath5k_ini {
        u16     ini_register;
        u32     ini_value;
 
        enum {
                AR5K_INI_WRITE = 0,     /* Default */
-               AR5K_INI_READ = 1,      /* Cleared on read */
+               AR5K_INI_READ = 1,
        } ini_mode;
 };
 
-/*
- * Mode specific initial register values
+/**
+ * struct ath5k_ini_mode - Mode specific initial register values
+ * @mode_register: Register address
+ * @mode_value: Set of values for each enum ath5k_driver_mode
  */
-
 struct ath5k_ini_mode {
        u16     mode_register;
        u32     mode_value[3];
@@ -386,11 +389,10 @@ static const struct ath5k_ini ar5211_ini[] = {
 
 /* Initial mode-specific settings for AR5211
  * 5211 supports OFDM-only g (draft g) but we
- * need to test it !
- */
+ * need to test it ! */
 static const struct ath5k_ini_mode ar5211_ini_mode[] = {
        { AR5K_TXCFG,
-       /*      A/XR          B           G       */
+       /*      A          B           G       */
           { 0x00000015, 0x0000001d, 0x00000015 } },
        { AR5K_QUEUE_DFS_LOCAL_IFS(0),
           { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
@@ -460,7 +462,7 @@ static const struct ath5k_ini_mode ar5211_ini_mode[] = {
           { 0x00000010, 0x00000010, 0x00000010 } },
 };
 
-/* Initial register settings for AR5212 */
+/* Initial register settings for AR5212 and newer chips */
 static const struct ath5k_ini ar5212_ini_common_start[] = {
        { AR5K_RXDP,            0x00000000 },
        { AR5K_RXCFG,           0x00000005 },
@@ -724,7 +726,8 @@ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
           { 0x00000000, 0x00000000, 0x00000108 } },
 };
 
-/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */
+/* Initial mode-specific settings for AR5212 + RF5111
+ * (Written after ar5212_ini) */
 static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
        { AR5K_TXCFG,
        /*      A/XR          B           G       */
@@ -757,6 +760,7 @@ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
           { 0x1883800a, 0x1873800a, 0x1883800a } },
 };
 
+/* Common for all modes */
 static const struct ath5k_ini rf5111_ini_common_end[] = {
        { AR5K_DCU_FP,          0x00000000 },
        { AR5K_PHY_AGC,         0x00000000 },
@@ -774,7 +778,9 @@ static const struct ath5k_ini rf5111_ini_common_end[] = {
        { 0xa23c,               0x13c889af },
 };
 
-/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for AR5212 + RF5112
+ * (Written after ar5212_ini) */
 static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
        { AR5K_TXCFG,
        /*      A/XR          B           G       */
@@ -825,7 +831,9 @@ static const struct ath5k_ini rf5112_ini_common_end[] = {
        { 0xa23c,               0x13c889af },
 };
 
-/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for RF5413/5414
+ * (Written after ar5212_ini) */
 static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
        { AR5K_TXCFG,
        /*      A/XR          B           G       */
@@ -963,7 +971,8 @@ static const struct ath5k_ini rf5413_ini_common_end[] = {
        { 0xa384, 0xf3307ff0 },
 };
 
-/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2413/2414
+ * (Written after ar5212_ini) */
 /* XXX: a mode ? */
 static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
        { AR5K_TXCFG,
@@ -1085,7 +1094,8 @@ static const struct ath5k_ini rf2413_ini_common_end[] = {
        { 0xa384, 0xf3307ff0 },
 };
 
-/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2425
+ * (Written after ar5212_ini) */
 /* XXX: a mode ? */
 static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
        { AR5K_TXCFG,
@@ -1357,10 +1367,15 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = {
 };
 
 
-/*
- * Write initial register dump
+/**
+ * ath5k_hw_ini_registers() - Write initial register dump common for all modes
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_regs: The array of &struct ath5k_ini
+ * @skip_pcu: Skip PCU registers
  */
-static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
+static void
+ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
                const struct ath5k_ini *ini_regs, bool skip_pcu)
 {
        unsigned int i;
@@ -1388,7 +1403,15 @@ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
        }
 }
 
-static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_mode: The array of &struct ath5k_ini_mode
+ * @mode: One of enum ath5k_driver_mode
+ */
+static void
+ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
                unsigned int size, const struct ath5k_ini_mode *ini_mode,
                u8 mode)
 {
@@ -1402,7 +1425,17 @@ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
 
 }
 
-int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
+/**
+ * ath5k_hw_write_initvals() - Write initial chip-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_driver_mode
+ * @skip_pcu: Skip PCU registers
+ *
+ * Write initial chip-specific register dump, to get the chipset on a
+ * clean and ready-to-work state after warm reset.
+ */
+int
+ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
 {
        /*
         * Write initial register settings
index dfa48eb..849fa06 100644 (file)
@@ -98,7 +98,7 @@ ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
                                        0xffff);
                        return true;
                }
-               udelay(15);
+               usleep_range(15, 20);
        }
 
        return false;
index a7eafa3..cebfd6f 100644 (file)
 #include "reg.h"
 #include "debug.h"
 
-/*
+/**
+ * DOC: Protocol Control Unit (PCU) functions
+ *
+ * Protocol control unit is responsible to maintain various protocol
+ * properties before a frame is send and after a frame is received to/from
+ * baseband. To be more specific, PCU handles:
+ *
+ * - Buffering of RX and TX frames (after QCU/DCUs)
+ *
+ * - Encrypting and decrypting (using the built-in engine)
+ *
+ * - Generating ACKs, RTS/CTS frames
+ *
+ * - Maintaining TSF
+ *
+ * - FCS
+ *
+ * - Updating beacon data (with TSF etc)
+ *
+ * - Generating virtual CCA
+ *
+ * - RX/Multicast filtering
+ *
+ * - BSSID filtering
+ *
+ * - Various statistics
+ *
+ * -Different operating modes: AP, STA, IBSS
+ *
+ * Note: Most of these functions can be tweaked/bypassed so you can do
+ * them on sw above for debugging or research. For more infos check out PCU
+ * registers on reg.h.
+ */
+
+/**
+ * DOC: ACK rates
+ *
  * AR5212+ can use higher rates for ack transmission
  * based on current tx rate instead of the base rate.
  * It does this to better utilize channel usage.
- * This is a mapping between G rates (that cover both
+ * There is a mapping between G rates (that cover both
  * CCK and OFDM) and ack rates that we use when setting
  * rate -> duration table. This mapping is hw-based so
  * don't change anything.
@@ -63,17 +99,18 @@ static const unsigned int ack_rates_high[] =
 \*******************/
 
 /**
- * ath5k_hw_get_frame_duration - Get tx time of a frame
- *
+ * ath5k_hw_get_frame_duration() - Get tx time of a frame
  * @ah: The &struct ath5k_hw
  * @len: Frame's length in bytes
  * @rate: The @struct ieee80211_rate
+ * @shortpre: Indicate short preample
  *
  * Calculate tx duration of a frame given it's rate and length
  * It extends ieee80211_generic_frame_duration for non standard
  * bwmodes.
  */
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
+int
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
                int len, struct ieee80211_rate *rate, bool shortpre)
 {
        int sifs, preamble, plcp_bits, sym_time;
@@ -129,11 +166,11 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
 }
 
 /**
- * ath5k_hw_get_default_slottime - Get the default slot time for current mode
- *
+ * ath5k_hw_get_default_slottime() - Get the default slot time for current mode
  * @ah: The &struct ath5k_hw
  */
-unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
 {
        struct ieee80211_channel *channel = ah->ah_current_channel;
        unsigned int slot_time;
@@ -160,11 +197,11 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
- *
+ * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
  * @ah: The &struct ath5k_hw
  */
-unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
 {
        struct ieee80211_channel *channel = ah->ah_current_channel;
        unsigned int sifs;
@@ -191,17 +228,17 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics)
- *
+ * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
  * @ah: The &struct ath5k_hw
  *
  * Reads MIB counters from PCU and updates sw statistics. Is called after a
  * MIB interrupt, because one of these counters might have reached their maximum
  * and triggered the MIB interrupt, to let us read and clear the counter.
  *
- * Is called in interrupt context!
+ * NOTE: Is called in interrupt context!
  */
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
+void
+ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
 {
        struct ath5k_statistics *stats = &ah->stats;
 
@@ -219,10 +256,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
 \******************/
 
 /**
- * ath5k_hw_write_rate_duration - fill rate code to duration table
- *
- * @ah: the &struct ath5k_hw
- * @mode: one of enum ath5k_driver_mode
+ * ath5k_hw_write_rate_duration() - Fill rate code to duration table
+ * @ah: The &struct ath5k_hw
  *
  * Write the rate code to duration table upon hw reset. This is a helper for
  * ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
@@ -236,7 +271,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
  * that include all OFDM and CCK rates.
  *
  */
-static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
+static inline void
+ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
 {
        struct ieee80211_rate *rate;
        unsigned int i;
@@ -280,12 +316,12 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
- *
+ * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
  * @ah: The &struct ath5k_hw
  * @timeout: Timeout in usec
  */
-static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
 {
        if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
                        <= timeout)
@@ -298,12 +334,12 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
 }
 
 /**
- * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
- *
+ * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
  * @ah: The &struct ath5k_hw
  * @timeout: Timeout in usec
  */
-static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
 {
        if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
                        <= timeout)
@@ -321,14 +357,14 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
 \*******************/
 
 /**
- * ath5k_hw_set_lladdr - Set station id
- *
+ * ath5k_hw_set_lladdr() - Set station id
  * @ah: The &struct ath5k_hw
- * @mac: The card's mac address
+ * @mac: The card's mac address (array of octets)
  *
  * Set station id on hw using the provided mac address
  */
-int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
+int
+ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
 {
        struct ath_common *common = ath5k_hw_common(ah);
        u32 low_id, high_id;
@@ -349,14 +385,14 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
 }
 
 /**
- * ath5k_hw_set_bssid - Set current BSSID on hw
- *
+ * ath5k_hw_set_bssid() - Set current BSSID on hw
  * @ah: The &struct ath5k_hw
  *
  * Sets the current BSSID and BSSID mask we have from the
  * common struct into the hardware
  */
-void ath5k_hw_set_bssid(struct ath5k_hw *ah)
+void
+ath5k_hw_set_bssid(struct ath5k_hw *ah)
 {
        struct ath_common *common = ath5k_hw_common(ah);
        u16 tim_offset = 0;
@@ -389,7 +425,23 @@ void ath5k_hw_set_bssid(struct ath5k_hw *ah)
        ath5k_hw_enable_pspoll(ah, NULL, 0);
 }
 
-void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
+/**
+ * ath5k_hw_set_bssid_mask() - Filter out bssids we listen
+ * @ah: The &struct ath5k_hw
+ * @mask: The BSSID mask to set (array of octets)
+ *
+ * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
+ * which bits of the interface's MAC address should be looked at when trying
+ * to decide which packets to ACK. In station mode and AP mode with a single
+ * BSS every bit matters since we lock to only one BSS. In AP mode with
+ * multiple BSSes (virtual interfaces) not every bit matters because hw must
+ * accept frames for all BSSes and so we tweak some bits of our mac address
+ * in order to have multiple BSSes.
+ *
+ * For more information check out ../hw.c of the common ath module.
+ */
+void
+ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
 {
        struct ath_common *common = ath5k_hw_common(ah);
 
@@ -400,18 +452,21 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
                ath_hw_setbssidmask(common);
 }
 
-/*
- * Set multicast filter
+/**
+ * ath5k_hw_set_mcast_filter() - Set multicast filter
+ * @ah: The &struct ath5k_hw
+ * @filter0: Lower 32bits of muticast filter
+ * @filter1: Higher 16bits of multicast filter
  */
-void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
+void
+ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
 {
        ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
        ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
 }
 
 /**
- * ath5k_hw_get_rx_filter - Get current rx filter
- *
+ * ath5k_hw_get_rx_filter() - Get current rx filter
  * @ah: The &struct ath5k_hw
  *
  * Returns the RX filter by reading rx filter and
@@ -420,7 +475,8 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
  * and pass to the driver. For a list of frame types
  * check out reg.h.
  */
-u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
 {
        u32 data, filter = 0;
 
@@ -440,8 +496,7 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_set_rx_filter - Set rx filter
- *
+ * ath5k_hw_set_rx_filter() - Set rx filter
  * @ah: The &struct ath5k_hw
  * @filter: RX filter mask (see reg.h)
  *
@@ -449,7 +504,8 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
  * register on 5212 and newer chips so that we have proper PHY
  * error reporting.
  */
-void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
+void
+ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
 {
        u32 data = 0;
 
@@ -493,13 +549,13 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
 #define ATH5K_MAX_TSF_READ 10
 
 /**
- * ath5k_hw_get_tsf64 - Get the full 64bit TSF
- *
+ * ath5k_hw_get_tsf64() - Get the full 64bit TSF
  * @ah: The &struct ath5k_hw
  *
  * Returns the current TSF
  */
-u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
+u64
+ath5k_hw_get_tsf64(struct ath5k_hw *ah)
 {
        u32 tsf_lower, tsf_upper1, tsf_upper2;
        int i;
@@ -536,28 +592,30 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
        return ((u64)tsf_upper1 << 32) | tsf_lower;
 }
 
+#undef ATH5K_MAX_TSF_READ
+
 /**
- * ath5k_hw_set_tsf64 - Set a new 64bit TSF
- *
+ * ath5k_hw_set_tsf64() - Set a new 64bit TSF
  * @ah: The &struct ath5k_hw
  * @tsf64: The new 64bit TSF
  *
  * Sets the new TSF
  */
-void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
+void
+ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
 {
        ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
        ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
 }
 
 /**
- * ath5k_hw_reset_tsf - Force a TSF reset
- *
+ * ath5k_hw_reset_tsf() - Force a TSF reset
  * @ah: The &struct ath5k_hw
  *
  * Forces a TSF reset on PCU
  */
-void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
+void
+ath5k_hw_reset_tsf(struct ath5k_hw *ah)
 {
        u32 val;
 
@@ -573,10 +631,17 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
        ath5k_hw_reg_write(ah, val, AR5K_BEACON);
 }
 
-/*
- * Initialize beacon timers
+/**
+ * ath5k_hw_init_beacon_timers() - Initialize beacon timers
+ * @ah: The &struct ath5k_hw
+ * @next_beacon: Next TBTT
+ * @interval: Current beacon interval
+ *
+ * This function is used to initialize beacon timers based on current
+ * operation mode and settings.
  */
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
+void
+ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
 {
        u32 timer1, timer2, timer3;
 
@@ -655,8 +720,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
 }
 
 /**
- * ath5k_check_timer_win - Check if timer B is timer A + window
- *
+ * ath5k_check_timer_win() - Check if timer B is timer A + window
  * @a: timer a (before b)
  * @b: timer b (after a)
  * @window: difference between a and b
@@ -686,12 +750,11 @@ ath5k_check_timer_win(int a, int b, int window, int intval)
 }
 
 /**
- * ath5k_hw_check_beacon_timers - Check if the beacon timers are correct
- *
+ * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
  * @ah: The &struct ath5k_hw
  * @intval: beacon interval
  *
- * This is a workaround for IBSS mode:
+ * This is a workaround for IBSS mode
  *
  * The need for this function arises from the fact that we have 4 separate
  * HW timer registers (TIMER0 - TIMER3), which are closely related to the
@@ -746,14 +809,14 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
 }
 
 /**
- * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
- *
+ * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
  * @ah: The &struct ath5k_hw
  * @coverage_class: IEEE 802.11 coverage class number
  *
  * Sets IFS intervals and ACK/CTS timeouts for given coverage class.
  */
-void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+void
+ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
 {
        /* As defined by IEEE 802.11-2007 17.3.8.6 */
        int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
@@ -772,8 +835,7 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
 \***************************/
 
 /**
- * ath5k_hw_start_rx_pcu - Start RX engine
- *
+ * ath5k_hw_start_rx_pcu() - Start RX engine
  * @ah: The &struct ath5k_hw
  *
  * Starts RX engine on PCU so that hw can process RXed frames
@@ -781,32 +843,33 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
  *
  * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
  */
-void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
 {
        AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
 }
 
 /**
- * at5k_hw_stop_rx_pcu - Stop RX engine
- *
+ * at5k_hw_stop_rx_pcu() - Stop RX engine
  * @ah: The &struct ath5k_hw
  *
  * Stops RX engine on PCU
  */
-void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
 {
        AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
 }
 
 /**
- * ath5k_hw_set_opmode - Set PCU operating mode
- *
+ * ath5k_hw_set_opmode() - Set PCU operating mode
  * @ah: The &struct ath5k_hw
- * @op_mode: &enum nl80211_iftype operating mode
+ * @op_mode: One of enum nl80211_iftype
  *
  * Configure PCU for the various operating modes (AP/STA etc)
  */
-int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
+int
+ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
 {
        struct ath_common *common = ath5k_hw_common(ah);
        u32 pcu_reg, beacon_reg, low_id, high_id;
@@ -873,8 +936,17 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
        return 0;
 }
 
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
-                                                               u8 mode)
+/**
+ * ath5k_hw_pcu_init() - Initialize PCU
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * This function is used to initialize PCU by setting current
+ * operation mode and various other settings.
+ */
+void
+ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
 {
        /* Set bssid and bssid mask */
        ath5k_hw_set_bssid(ah);
index 01cb72d..e1f8613 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * PHY functions
- *
  * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
  * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
  * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
  *
  */
 
+/***********************\
+* PHY related functions *
+\***********************/
+
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 #include "../regd.h"
 
 
+/**
+ * DOC: PHY related functions
+ *
+ * Here we handle the low-level functions related to baseband
+ * and analog frontend (RF) parts. This is by far the most complex
+ * part of the hw code so make sure you know what you are doing.
+ *
+ * Here is a list of what this is all about:
+ *
+ * - Channel setting/switching
+ *
+ * - Automatic Gain Control (AGC) calibration
+ *
+ * - Noise Floor calibration
+ *
+ * - I/Q imbalance calibration (QAM correction)
+ *
+ * - Calibration due to thermal changes (gain_F)
+ *
+ * - Spur noise mitigation
+ *
+ * - RF/PHY initialization for the various operating modes and bwmodes
+ *
+ * - Antenna control
+ *
+ * - TX power control per channel/rate/packet type
+ *
+ * Also have in mind we never got documentation for most of these
+ * functions, what we have comes mostly from Atheros's code, reverse
+ * engineering and patent docs/presentations etc.
+ */
+
+
 /******************\
 * Helper functions *
 \******************/
 
-/*
- * Get the PHY Chip revision
+/**
+ * ath5k_hw_radio_revision() - Get the PHY Chip revision
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Returns the revision number of a 2GHz, 5GHz or single chip
+ * radio.
  */
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+u16
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
 {
        unsigned int i;
        u32 srev;
@@ -58,7 +99,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
                return 0;
        }
 
-       mdelay(2);
+       usleep_range(2000, 2500);
 
        /* ...wait until PHY is ready and read the selected radio revision */
        ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
@@ -81,10 +122,16 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
        return ret;
 }
 
-/*
- * Check if a channel is supported
+/**
+ * ath5k_channel_ok() - Check if a channel is supported by the hw
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Note: We don't do any regulatory domain checks here, it's just
+ * a sanity check.
  */
-bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+bool
+ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
 {
        u16 freq = channel->center_freq;
 
@@ -101,7 +148,13 @@ bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
        return false;
 }
 
-bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ */
+bool
+ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
                                struct ieee80211_channel *channel)
 {
        u8 refclk_freq;
@@ -122,11 +175,20 @@ bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
                return false;
 }
 
-/*
- * Used to modify RF Banks before writing them to AR5K_RF_BUFFER
+/**
+ * ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer
+ * @ah: The &struct ath5k_hw
+ * @rf_regs: The struct ath5k_rf_reg
+ * @val: New value
+ * @reg_id: RF register ID
+ * @set: Indicate we need to swap data
+ *
+ * This is an internal function used to modify RF Banks before
+ * writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more
+ * infos.
  */
-static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
-                                       const struct ath5k_rf_reg *rf_regs,
+static unsigned int
+ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs,
                                        u32 val, u8 reg_id, bool set)
 {
        const struct ath5k_rf_reg *rfreg = NULL;
@@ -204,8 +266,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
 }
 
 /**
- * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
- *
+ * ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212
  * @ah: the &struct ath5k_hw
  * @channel: the currently set channel upon reset
  *
@@ -216,10 +277,11 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
  * mantissa and provide these values on hw.
  *
  * For more infos i think this patent is related
- * http://www.freepatentsonline.com/7184495.html
+ * "http://www.freepatentsonline.com/7184495.html"
  */
-static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
-       struct ieee80211_channel *channel)
+static inline int
+ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
+                               struct ieee80211_channel *channel)
 {
        /* Get exponent and mantissa and set it */
        u32 coef_scaled, coef_exp, coef_man,
@@ -278,6 +340,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
        return 0;
 }
 
+/**
+ * ath5k_hw_phy_disable() - Disable PHY
+ * @ah: The &struct ath5k_hw
+ */
 int ath5k_hw_phy_disable(struct ath5k_hw *ah)
 {
        /*Just a try M.F.*/
@@ -286,10 +352,13 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
        return 0;
 }
 
-/*
- * Wait for synth to settle
+/**
+ * ath5k_hw_wait_for_synth() - Wait for synth to settle
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
  */
-static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+static void
+ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
                        struct ieee80211_channel *channel)
 {
        /*
@@ -308,9 +377,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
                        delay = delay << 2;
                /* XXX: /2 on turbo ? Let's be safe
                 * for now */
-               udelay(100 + delay);
+               usleep_range(100 + delay, 100 + (2 * delay));
        } else {
-               mdelay(1);
+               usleep_range(1000, 1500);
        }
 }
 
@@ -319,7 +388,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
 * RF Gain optimization *
 \**********************/
 
-/*
+/**
+ * DOC: RF Gain optimization
+ *
  * This code is used to optimize RF gain on different environments
  * (temperature mostly) based on feedback from a power detector.
  *
@@ -328,22 +399,22 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
  * no gain optimization ladder-.
  *
  * For more infos check out this patent doc
- * http://www.freepatentsonline.com/7400691.html
+ * "http://www.freepatentsonline.com/7400691.html"
  *
  * This paper describes power drops as seen on the receiver due to
  * probe packets
- * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
- * %20of%20Power%20Control.pdf
+ * "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
+ * %20of%20Power%20Control.pdf"
  *
  * And this is the MadWiFi bug entry related to the above
- * http://madwifi-project.org/ticket/1659
+ * "http://madwifi-project.org/ticket/1659"
  * with various measurements and diagrams
- *
- * TODO: Deal with power drops due to probes by setting an appropriate
- * tx power on the probe packets ! Make this part of the calibration process.
  */
 
-/* Initialize ah_gain during attach */
+/**
+ * ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach
+ * @ah: The &struct ath5k_hw
+ */
 int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
 {
        /* Initialize the gain optimization values */
@@ -367,17 +438,21 @@ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
        return 0;
 }
 
-/* Schedule a gain probe check on the next transmitted packet.
+/**
+ * ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet
+ * @ah: The &struct ath5k_hw
+ *
+ * Schedules a gain probe check on the next transmitted packet.
  * That means our next packet is going to be sent with lower
  * tx power and a Peak to Average Power Detector (PAPD) will try
  * to measure the gain.
  *
- * XXX:  How about forcing a tx packet (bypassing PCU arbitrator etc)
+ * TODO: Force a tx packet (bypassing PCU arbitrator etc)
  * just after we enable the probe so that we don't mess with
- * standard traffic ? Maybe it's time to use sw interrupts and
- * a probe tasklet !!!
+ * standard traffic.
  */
-static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
+static void
+ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
 {
 
        /* Skip if gain calibration is inactive or
@@ -395,9 +470,15 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
 
 }
 
-/* Calculate gain_F measurement correction
- * based on the current step for RF5112 rev. 2 */
-static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction
+ * @ah: The &struct ath5k_hw
+ *
+ * Calculate Gain_F measurement correction
+ * based on the current step for RF5112 rev. 2
+ */
+static u32
+ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
 {
        u32 mix, step;
        u32 *rf;
@@ -450,11 +531,19 @@ static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
        return ah->ah_gain.g_f_corr;
 }
 
-/* Check if current gain_F measurement is in the range of our
+/**
+ * ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector
+ * @ah: The &struct ath5k_hw
+ *
+ * Check if current gain_F measurement is in the range of our
  * power detector windows. If we get a measurement outside range
  * we know it's not accurate (detectors can't measure anything outside
- * their detection window) so we must ignore it */
-static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
+ * their detection window) so we must ignore it.
+ *
+ * Returns true if readback was O.K. or false on failure
+ */
+static bool
+ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
 {
        const struct ath5k_rf_reg *rf_regs;
        u32 step, mix_ovr, level[4];
@@ -506,9 +595,15 @@ static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
                        ah->ah_gain.g_current <= level[3]);
 }
 
-/* Perform gain_F adjustment by choosing the right set
- * of parameters from RF gain optimization ladder */
-static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment
+ * @ah: The &struct ath5k_hw
+ *
+ * Choose the right target gain based on current gain
+ * and RF gain optimization ladder
+ */
+static s8
+ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
 {
        const struct ath5k_gain_opt *go;
        const struct ath5k_gain_opt_step *g_step;
@@ -572,13 +667,18 @@ done:
        return ret;
 }
 
-/* Main callback for thermal RF gain calibration engine
+/**
+ * ath5k_hw_gainf_calibrate() - Do a gain_F calibration
+ * @ah: The &struct ath5k_hw
+ *
+ * Main callback for thermal RF gain calibration engine
  * Check for a new gain reading and schedule an adjustment
  * if needed.
  *
- * TODO: Use sw interrupt to schedule reset if gain_F needs
- * adjustment */
-enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
+ * Returns one of enum ath5k_rfgain codes
+ */
+enum ath5k_rfgain
+ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
 {
        u32 data, type;
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -638,10 +738,18 @@ done:
        return ah->ah_gain.g_state;
 }
 
-/* Write initial RF gain table to set the RF sensitivity
- * this one works on all RF chips and has nothing to do
- * with gain_F calibration */
-static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+/**
+ * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Write initial RF gain table to set the RF sensitivity.
+ *
+ * NOTE: This one works on all RF chips and has nothing to do
+ * with Gain_F calibration
+ */
+static int
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
 {
        const struct ath5k_ini_rfgain *ath5k_rfg;
        unsigned int i, size, index;
@@ -688,16 +796,23 @@ static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
 }
 
 
-
 /********************\
 * RF Registers setup *
 \********************/
 
-/*
- * Setup RF registers by writing RF buffer on hw
+/**
+ * ath5k_hw_rfregs_init() - Initialize RF register settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * Setup RF registers by writing RF buffer on hw. For
+ * more infos on this, check out rfbuffer.h
  */
-static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
-       struct ieee80211_channel *channel, unsigned int mode)
+static int
+ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+                       struct ieee80211_channel *channel,
+                       unsigned int mode)
 {
        const struct ath5k_rf_reg *rf_regs;
        const struct ath5k_ini_rfbuffer *ini_rfb;
@@ -1055,19 +1170,18 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
   PHY/RF channel functions
 \**************************/
 
-/*
- * Conversion needed for RF5110
+/**
+ * ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110
+ * @channel: The &struct ieee80211_channel
+ *
+ * Map channel frequency to IEEE channel number and convert it
+ * to an internal channel value used by the RF5110 chipset.
  */
-static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
+static u32
+ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
 {
        u32 athchan;
 
-       /*
-        * Convert IEEE channel/MHz to an internal channel value used
-        * by the AR5210 chipset. This has not been verified with
-        * newer chipsets like the AR5212A who have a completely
-        * different RF/PHY part.
-        */
        athchan = (ath5k_hw_bitswap(
                        (ieee80211_frequency_to_channel(
                                channel->center_freq) - 24) / 2, 5)
@@ -1075,10 +1189,13 @@ static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
        return athchan;
 }
 
-/*
- * Set channel on RF5110
+/**
+ * ath5k_hw_rf5110_channel() - Set channel frequency on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
  */
-static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        u32 data;
@@ -1089,15 +1206,23 @@ static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
        data = ath5k_hw_rf5110_chan2athchan(channel);
        ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER);
        ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0);
-       mdelay(1);
+       usleep_range(1000, 1500);
 
        return 0;
 }
 
-/*
- * Conversion needed for 5111
+/**
+ * ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111
+ * @ieee: IEEE channel number
+ * @athchan: The &struct ath5k_athchan_2ghz
+ *
+ * In order to enable the RF2111 frequency converter on RF5111/2111 setups
+ * we need to add some offsets and extra flags to the data values we pass
+ * on to the PHY. So for every 2GHz channel this function gets called
+ * to do the conversion.
  */
-static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
+static int
+ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
                struct ath5k_athchan_2ghz *athchan)
 {
        int channel;
@@ -1123,10 +1248,13 @@ static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
        return 0;
 }
 
-/*
- * Set channel on 5111
+/**
+ * ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
  */
-static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        struct ath5k_athchan_2ghz ath5k_channel_2ghz;
@@ -1171,10 +1299,20 @@ static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Set channel on 5112 and newer
+/**
+ * ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * On RF5112/2112 and newer we don't need to do any conversion.
+ * We pass the frequency value after a few modifications to the
+ * chip directly.
+ *
+ * NOTE: Make sure channel frequency given is within our range or else
+ * we might damage the chip ! Use ath5k_channel_ok before calling this one.
  */
-static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        u32 data, data0, data1, data2;
@@ -1183,17 +1321,37 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
        data = data0 = data1 = data2 = 0;
        c = channel->center_freq;
 
+       /* My guess based on code:
+        * 2GHz RF has 2 synth modes, one with a Local Oscillator
+        * at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz
+        * (3040/2). data0 is used to set the PLL divider and data1
+        * selects synth mode. */
        if (c < 4800) {
+               /* Channel 14 and all frequencies with 2Hz spacing
+                * below/above (non-standard channels) */
                if (!((c - 2224) % 5)) {
+                       /* Same as (c - 2224) / 5 */
                        data0 = ((2 * (c - 704)) - 3040) / 10;
                        data1 = 1;
+               /* Channel 1 and all frequencies with 5Hz spacing
+                * below/above (standard channels without channel 14) */
                } else if (!((c - 2192) % 5)) {
+                       /* Same as (c - 2192) / 5 */
                        data0 = ((2 * (c - 672)) - 3040) / 10;
                        data1 = 0;
                } else
                        return -EINVAL;
 
                data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
+       /* This is more complex, we have a single synthesizer with
+        * 4 reference clock settings (?) based on frequency spacing
+        * and set using data2. LO is at 4800Hz and data0 is again used
+        * to set some divider.
+        *
+        * NOTE: There is an old atheros presentation at Stanford
+        * that mentions a method called dual direct conversion
+        * with 1GHz sliding IF for RF5110. Maybe that's what we
+        * have here, or an updated version. */
        } else if ((c % 5) != 2 || c > 5435) {
                if (!(c % 20) && c >= 5120) {
                        data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
@@ -1219,10 +1377,16 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Set the channel on the RF2425
+/**
+ * ath5k_hw_rf2425_channel() - Set channel frequency on RF2425
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * AR2425/2417 have a different 2GHz RF so code changes
+ * a little bit from RF5112.
  */
-static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        u32 data, data0, data2;
@@ -1258,10 +1422,16 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Set a channel on the radio chip
+/**
+ * ath5k_hw_channel() - Set a channel on the radio chip
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This is the main function called to set a channel on the
+ * radio chip based on the radio chip version.
  */
-static int ath5k_hw_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_channel(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        int ret;
@@ -1313,11 +1483,46 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
        return 0;
 }
 
+
 /*****************\
   PHY calibration
 \*****************/
 
-static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
+/**
+ * DOC: PHY Calibration routines
+ *
+ * Noise floor calibration: When we tell the hardware to
+ * perform a noise floor calibration by setting the
+ * AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically
+ * sample-and-hold the minimum noise level seen at the antennas.
+ * This value is then stored in a ring buffer of recently measured
+ * noise floor values so we have a moving window of the last few
+ * samples. The median of the values in the history is then loaded
+ * into the hardware for its own use for RSSI and CCA measurements.
+ * This type of calibration doesn't interfere with traffic.
+ *
+ * AGC calibration: When we tell the hardware to perform
+ * an AGC (Automatic Gain Control) calibration by setting the
+ * AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does
+ * a calibration on the DC offsets of ADCs. During this period
+ * rx/tx gets disabled so we have to deal with it on the driver
+ * part.
+ *
+ * I/Q calibration: When we tell the hardware to perform
+ * an I/Q calibration, it tries to correct I/Q imbalance and
+ * fix QAM constellation by sampling data from rxed frames.
+ * It doesn't interfere with traffic.
+ *
+ * For more infos on AGC and I/Q calibration check out patent doc
+ * #03/094463.
+ */
+
+/**
+ * ath5k_hw_read_measured_noise_floor() - Read measured NF from hw
+ * @ah: The &struct ath5k_hw
+ */
+static s32
+ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
 {
        s32 val;
 
@@ -1325,7 +1530,12 @@ static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
        return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
 }
 
-void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ */
+void
+ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
 {
        int i;
 
@@ -1334,6 +1544,11 @@ void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
                ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
 }
 
+/**
+ * ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ * @noise_floor: The NF we got from hw
+ */
 static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
 {
        struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
@@ -1341,7 +1556,12 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
        hist->nfval[hist->index] = noise_floor;
 }
 
-static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer
+ * @ah: The &struct ath5k_hw
+ */
+static s16
+ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
 {
        s16 sort[ATH5K_NF_CAL_HIST_MAX];
        s16 tmp;
@@ -1364,18 +1584,16 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
        return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
 }
 
-/*
- * When we tell the hardware to perform a noise floor calibration
- * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
- * sample-and-hold the minimum noise level seen at the antennas.
- * This value is then stored in a ring buffer of recently measured
- * noise floor values so we have a moving window of the last few
- * samples.
+/**
+ * ath5k_hw_update_noise_floor() - Update NF on hardware
+ * @ah: The &struct ath5k_hw
  *
- * The median of the values in the history is then loaded into the
- * hardware for its own use for RSSI and CCA measurements.
+ * This is the main function we call to perform a NF calibration,
+ * it reads NF from hardware, calculates the median and updates
+ * NF on hw.
  */
-void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+void
+ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
 {
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
        u32 val;
@@ -1390,6 +1608,8 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
                return;
        }
 
+       ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
+
        ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
 
        /* completed NF calibration, test threshold */
@@ -1434,20 +1654,29 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
 
        ah->ah_noise_floor = nf;
 
+       ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+
        ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
                "noise floor calibrated: %d\n", nf);
 }
 
-/*
- * Perform a PHY calibration on RF5110
- * -Fix BPSK/QAM Constellation (I/Q correction)
+/**
+ * ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Do a complete PHY calibration (AGC + NF + I/Q) on RF5110
  */
-static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        u32 phy_sig, phy_agc, phy_sat, beacon;
        int ret;
 
+       if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL))
+               return 0;
+
        /*
         * Disable beacons and RX/TX queues, wait
         */
@@ -1456,7 +1685,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
        beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
        ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
 
-       mdelay(2);
+       usleep_range(2000, 2500);
 
        /*
         * Set the channel (with AGC turned off)
@@ -1469,7 +1698,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
         * Activate PHY and wait
         */
        ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
-       mdelay(1);
+       usleep_range(1000, 1500);
 
        AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
 
@@ -1506,7 +1735,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
        ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG);
        AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
 
-       mdelay(1);
+       usleep_range(1000, 1500);
 
        /*
         * Enable calibration and wait until completion
@@ -1537,8 +1766,9 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Perform I/Q calibration on RF5111/5112 and newer chips
+/**
+ * ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer
+ * @ah: The &struct ath5k_hw
  */
 static int
 ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
@@ -1547,12 +1777,19 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
        s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
        int i;
 
-       if (!ah->ah_calibration ||
-               ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
-               return 0;
+       /* Skip if I/Q calibration is not needed or if it's still running */
+       if (!ah->ah_iq_cal_needed)
+               return -EINVAL;
+       else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) {
+               ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+                               "I/Q calibration still running");
+               return -EBUSY;
+       }
 
        /* Calibration has finished, get the results and re-run */
-       /* work around empty results which can apparently happen on 5212 */
+
+       /* Work around for empty results which can apparently happen on 5212:
+        * Read registers up to 10 times until we get both i_pr and q_pwr */
        for (i = 0; i <= 10; i++) {
                iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
                i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
@@ -1570,9 +1807,13 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
        else
                q_coffd = q_pwr >> 7;
 
-       /* protect against divide by 0 and loss of sign bits */
+       /* In case i_coffd became zero, cancel calibration
+        * not only it's too small, it'll also result a divide
+        * by zero later on. */
        if (i_coffd == 0 || q_coffd < 2)
-               return 0;
+               return -ECANCELED;
+
+       /* Protect against loss of sign bits */
 
        i_coff = (-iq_corr) / i_coffd;
        i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1601,10 +1842,17 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
        return 0;
 }
 
-/*
- * Perform a PHY calibration
+/**
+ * ath5k_hw_phy_calibrate() - Perform a PHY calibration
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * The main function we call from above to perform
+ * a short or full PHY calibration based on RF chip
+ * and current channel
  */
-int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+int
+ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        int ret;
@@ -1613,10 +1861,43 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
                return ath5k_hw_rf5110_calibrate(ah, channel);
 
        ret = ath5k_hw_rf511x_iq_calibrate(ah);
+       if (ret) {
+               ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+                       "No I/Q correction performed (%uMHz)\n",
+                       channel->center_freq);
+
+               /* Happens all the time if there is not much
+                * traffic, consider it normal behaviour. */
+               ret = 0;
+       }
+
+       /* On full calibration do an AGC calibration and
+        * request a PAPD probe for gainf calibration if
+        * needed */
+       if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
 
-       if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) &&
-           (channel->hw_value != AR5K_MODE_11B))
-               ath5k_hw_request_rfgain_probe(ah);
+               AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+                                       AR5K_PHY_AGCCTL_CAL);
+
+               ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
+                       AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF,
+                       0, false);
+               if (ret) {
+                       ATH5K_ERR(ah,
+                               "gain calibration timeout (%uMHz)\n",
+                               channel->center_freq);
+               }
+
+               if ((ah->ah_radio == AR5K_RF5111 ||
+                       ah->ah_radio == AR5K_RF5112)
+                       && (channel->hw_value != AR5K_MODE_11B))
+                       ath5k_hw_request_rfgain_probe(ah);
+       }
+
+       /* Update noise floor
+        * XXX: Only do this after AGC calibration */
+       if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
+               ath5k_hw_update_noise_floor(ah);
 
        return ret;
 }
@@ -1626,6 +1907,16 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
 * Spur mitigation functions *
 \***************************/
 
+/**
+ * ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This function gets called during PHY initialization to
+ * configure the spur filter for the given channel. Spur is noise
+ * generated due to "reflection" effects, for more information on this
+ * method check out patent US7643810
+ */
 static void
 ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
                                struct ieee80211_channel *channel)
@@ -1865,15 +2156,73 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
 * Antenna control *
 \*****************/
 
-static void /*TODO:Boundary check*/
+/**
+ * DOC: Antenna control
+ *
+ * Hw supports up to 14 antennas ! I haven't found any card that implements
+ * that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2
+ * for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX)
+ * omnidirectional or sectorial and antennas 3-14 sectorial (or directional).
+ *
+ * We can have a single antenna for RX and multiple antennas for TX.
+ * RX antenna is our "default" antenna (usually antenna 1) set on
+ * DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor
+ * (0 for automatic selection, 1 - 14 antenna number).
+ *
+ * We can let hw do all the work doing fast antenna diversity for both
+ * tx and rx or we can do things manually. Here are the options we have
+ * (all are bits of STA_ID1 register):
+ *
+ * AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX
+ * control descriptor, use the default antenna to transmit or else use the last
+ * antenna on which we received an ACK.
+ *
+ * AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to
+ * the antenna on which we got the ACK for that frame.
+ *
+ * AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the
+ * one on the TX descriptor.
+ *
+ * AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames
+ * (ACKs etc), or else use current antenna (the one we just used for TX).
+ *
+ * Using the above we support the following scenarios:
+ *
+ * AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically
+ *
+ * AR5K_ANTMODE_FIXED_A        -> Only antenna A (MAIN) is present
+ *
+ * AR5K_ANTMODE_FIXED_B        -> Only antenna B (AUX) is present
+ *
+ * AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap
+ *
+ * AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx-
+ *
+ * Also note that when setting antenna to F on tx descriptor card inverts
+ * current tx antenna.
+ */
+
+/**
+ * ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer
+ * @ah: The &struct ath5k_hw
+ * @ant: Antenna number
+ */
+static void
 ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
 {
        if (ah->ah_version != AR5K_AR5210)
                ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
 }
 
-/*
- * Enable/disable fast rx antenna diversity
+/**
+ * ath5k_hw_set_fast_div() -  Enable/disable fast rx antenna diversity
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @enable: True to enable, false to disable
  */
 static void
 ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
@@ -1913,6 +2262,14 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
        }
 }
 
+/**
+ * ath5k_hw_set_antenna_switch() - Set up antenna switch table
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ *
+ * Switch table comes from EEPROM and includes information on controlling
+ * the 2 antenna RX attenuators
+ */
 void
 ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
 {
@@ -1944,8 +2301,10 @@ ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
                AR5K_PHY_ANT_SWITCH_TABLE_1);
 }
 
-/*
- * Set antenna operating mode
+/**
+ * ath5k_hw_set_antenna_mode() -  Set antenna operating mode
+ * @ah: The &struct ath5k_hw
+ * @ant_mode: One of enum ath5k_ant_mode
  */
 void
 ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
@@ -2068,8 +2427,13 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
  * Helper functions
  */
 
-/*
- * Do linear interpolation between two given (x, y) points
+/**
+ * ath5k_get_interpolated_value() - Get interpolated Y val between two points
+ * @target: X value of the middle point
+ * @x_left: X value of the left point
+ * @x_right: X value of the right point
+ * @y_left: Y value of the left point
+ * @y_right: Y value of the right point
  */
 static s16
 ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
@@ -2096,13 +2460,18 @@ ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
        return result;
 }
 
-/*
- * Find vertical boundary (min pwr) for the linear PCDAC curve.
+/**
+ * ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the
+ * linear PCDAC curve
+ * @stepL: Left array with y values (pcdac steps)
+ * @stepR: Right array with y values (pcdac steps)
+ * @pwrL: Left array with x values (power steps)
+ * @pwrR: Right array with x values (power steps)
  *
  * Since we have the top of the curve and we draw the line below
  * until we reach 1 (1 pcdac step) we need to know which point
- * (x value) that is so that we don't go below y axis and have negative
- * pcdac values when creating the curve, or fill the table with zeroes.
+ * (x value) that is so that we don't go below x axis and have negative
+ * pcdac values when creating the curve, or fill the table with zeros.
  */
 static s16
 ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
@@ -2148,7 +2517,16 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
        return max(min_pwrL, min_pwrR);
 }
 
-/*
+/**
+ * ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve
+ * @pmin: Minimum power value (xmin)
+ * @pmax: Maximum power value (xmax)
+ * @pwr: Array of power steps (x values)
+ * @vpd: Array of matching PCDAC/PDADC steps (y values)
+ * @num_points: Number of provided points
+ * @vpd_table: Array to fill with the full PCDAC/PDADC values (y values)
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
  * Interpolate (pwr,vpd) points to create a Power to PDADC or a
  * Power to PCDAC curve.
  *
@@ -2206,7 +2584,14 @@ ath5k_create_power_curve(s16 pmin, s16 pmax,
        }
 }
 
-/*
+/**
+ * ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers
+ * for a given channel.
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier
+ * @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier
+ *
  * Get the surrounding per-channel power calibration piers
  * for a given frequency so that we can interpolate between
  * them and come up with an appropriate dataset for our current
@@ -2289,11 +2674,17 @@ done:
        *pcinfo_r = &pcinfo[idx_r];
 }
 
-/*
+/**
+ * ath5k_get_rate_pcal_data() - Get the interpolated per-rate power
+ * calibration data
+ * @ah: The &struct ath5k_hw *ah,
+ * @channel: The &struct ieee80211_channel
+ * @rates: The &struct ath5k_rate_pcal_info to fill
+ *
  * Get the surrounding per-rate power calibration data
  * for a given frequency and interpolate between power
  * values to set max target power supported by hw for
- * each rate.
+ * each rate on this frequency.
  */
 static void
 ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
@@ -2381,7 +2772,11 @@ done:
                                        rpinfo[idx_r].target_power_54);
 }
 
-/*
+/**
+ * ath5k_get_max_ctl_power() - Get max edge power for a given frequency
+ * @ah: the &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
  * Get the max edge power for this channel if
  * we have such data from EEPROM's Conformance Test
  * Limits (CTL), and limit max power if needed.
@@ -2461,8 +2856,39 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
  * Power to PCDAC table functions
  */
 
-/*
- * Fill Power to PCDAC table on RF5111
+/**
+ * DOC: Power to PCDAC table functions
+ *
+ * For RF5111 we have an XPD -eXternal Power Detector- curve
+ * for each calibrated channel. Each curve has 0,5dB Power steps
+ * on x axis and PCDAC steps (offsets) on y axis and looks like an
+ * exponential function. To recreate the curve we read 11 points
+ * from eeprom (eeprom.c) and interpolate here.
+ *
+ * For RF5112 we have 4 XPD -eXternal Power Detector- curves
+ * for each calibrated channel on 0, -6, -12 and -18dBm but we only
+ * use the higher (3) and the lower (0) curves. Each curve again has 0.5dB
+ * power steps on x axis and PCDAC steps on y axis and looks like a
+ * linear function. To recreate the curve and pass the power values
+ * on hw, we get 4 points for xpd 0 (lower gain -> max power)
+ * and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c)
+ * and interpolate here.
+ *
+ * For a given channel we get the calibrated points (piers) for it or
+ * -if we don't have calibration data for this specific channel- from the
+ * available surrounding channels we have calibration data for, after we do a
+ * linear interpolation between them. Then since we have our calibrated points
+ * for this channel, we do again a linear interpolation between them to get the
+ * whole curve.
+ *
+ * We finally write the Y values of the curve(s) (the PCDAC values) on hw
+ */
+
+/**
+ * ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
  *
  * No further processing is needed for RF5111, the only thing we have to
  * do is fill the values below and above calibration range since eeprom data
@@ -2503,10 +2929,14 @@ ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
 
 }
 
-/*
- * Combine available XPD Curves and fill Linear Power to PCDAC table
- * on RF5112
+/**
+ * ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
+ * @pdcurves: Number of pd curves
  *
+ * Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112
  * RFX112 can have up to 2 curves (one for low txpower range and one for
  * higher txpower range). We need to put them both on pcdac_out and place
  * them in the correct location. In case we only have one curve available
@@ -2608,7 +3038,10 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
        }
 }
 
-/* Write PCDAC values on hw */
+/**
+ * ath5k_write_pcdac_table() - Write the PCDAC values on hw
+ * @ah: The &struct ath5k_hw
+ */
 static void
 ath5k_write_pcdac_table(struct ath5k_hw *ah)
 {
@@ -2631,9 +3064,32 @@ ath5k_write_pcdac_table(struct ath5k_hw *ah)
  * Power to PDADC table functions
  */
 
-/*
- * Set the gain boundaries and create final Power to PDADC table
+/**
+ * DOC: Power to PDADC table functions
+ *
+ * For RF2413 and later we have a Power to PDADC table (Power Detector)
+ * instead of a PCDAC (Power Control) and 4 pd gain curves for each
+ * calibrated channel. Each curve has power on x axis in 0.5 db steps and
+ * PDADC steps on y axis and looks like an exponential function like the
+ * RF5111 curve.
+ *
+ * To recreate the curves we read the points from eeprom (eeprom.c)
+ * and interpolate here. Note that in most cases only 2 (higher and lower)
+ * curves are used (like RF5112) but vendors have the opportunity to include
+ * all 4 curves on eeprom. The final curve (higher power) has an extra
+ * point for better accuracy like RF5112.
  *
+ * The process is similar to what we do above for RF5111/5112
+ */
+
+/**
+ * ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves
+ * @ah: The &struct ath5k_hw
+ * @pwr_min: Minimum power (x min)
+ * @pwr_max: Maximum power (x max)
+ * @pdcurves: Number of available curves
+ *
+ * Combine the various pd curves and create the final Power to PDADC table
  * We can have up to 4 pd curves, we need to do a similar process
  * as we do for RF5112. This time we don't have an edge_flag but we
  * set the gain boundaries on a separate register.
@@ -2757,7 +3213,11 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
 
 }
 
-/* Write PDADC values on hw */
+/**
+ * ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
 static void
 ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
 {
@@ -2814,7 +3274,13 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
  * Common code for PCDAC/PDADC tables
  */
 
-/*
+/**
+ * ath5k_setup_channel_powertable() - Set up power table for this channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
  * This is the main function that uses all of the above
  * to set PCDAC/PDADC table on hw for the current channel.
  * This table is used for tx power calibration on the baseband,
@@ -3012,7 +3478,12 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
        return 0;
 }
 
-/* Write power table for current channel to hw */
+/**
+ * ath5k_write_channel_powertable() - Set power table for current channel on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ */
 static void
 ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
 {
@@ -3022,28 +3493,36 @@ ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
                ath5k_write_pcdac_table(ah);
 }
 
-/*
- * Per-rate tx power setting
+
+/**
+ * DOC: Per-rate tx power setting
  *
- * This is the code that sets the desired tx power (below
+ * This is the code that sets the desired tx power limit (below
  * maximum) on hw for each rate (we also have TPC that sets
- * power per packet). We do that by providing an index on the
- * PCDAC/PDADC table we set up.
- */
-
-/*
- * Set rate power table
+ * power per packet type). We do that by providing an index on the
+ * PCDAC/PDADC table we set up above, for each rate.
  *
  * For now we only limit txpower based on maximum tx power
- * supported by hw (what's inside rate_info). We need to limit
- * this even more, based on regulatory domain etc.
+ * supported by hw (what's inside rate_info) + conformance test
+ * limits. We need to limit this even more, based on regulatory domain
+ * etc to be safe. Normally this is done from above so we don't care
+ * here, all we care is that the tx power we set will be O.K.
+ * for the hw (e.g. won't create noise on PA etc).
  *
- * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps)
- * and is indexed as follows:
+ * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps -
+ * x values) and is indexed as follows:
  * rates[0] - rates[7] -> OFDM rates
  * rates[8] - rates[14] -> CCK rates
  * rates[15] -> XR rates (they all have the same power)
  */
+
+/**
+ * ath5k_setup_rate_powertable() - Set up rate power table for a given tx power
+ * @ah: The &struct ath5k_hw
+ * @max_pwr: The maximum tx power requested in 0.5dB steps
+ * @rate_info: The &struct ath5k_rate_pcal_info to fill
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
 static void
 ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
                        struct ath5k_rate_pcal_info *rate_info,
@@ -3114,8 +3593,14 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
 }
 
 
-/*
- * Set transmission power
+/**
+ * ath5k_hw_txpower() - Set transmission power limit for a given channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @txpower: Requested tx power in 0.5dB steps
+ *
+ * Combines all of the above to set the requested tx power limit
+ * on hw.
  */
 static int
 ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
@@ -3233,7 +3718,16 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
        return 0;
 }
 
-int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
+/**
+ * ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel
+ * @ah: The &struct ath5k_hw
+ * @txpower: The requested tx power limit in 0.5dB steps
+ *
+ * This function provides access to ath5k_hw_txpower to the driver in
+ * case user or an application changes it while PHY is running.
+ */
+int
+ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
 {
        ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
                "changing txpower to %d\n", txpower);
@@ -3241,11 +3735,26 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
        return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
 }
 
+
 /*************\
  Init function
 \*************/
 
-int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+/**
+ * ath5k_hw_phy_init() - Initialize PHY
+ * @ah: The &struct ath5k_hw
+ * @channel: The @struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ * @fast: Try a fast channel switch instead
+ *
+ * This is the main function used during reset to initialize PHY
+ * or do a fast channel change if possible.
+ *
+ * NOTE: Do not call this one from the driver, it assumes PHY is in a
+ * warm reset state !
+ */
+int
+ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                      u8 mode, bool fast)
 {
        struct ieee80211_channel *curr_channel;
@@ -3355,7 +3864,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                if (ret)
                        return ret;
 
-               mdelay(1);
+               usleep_range(1000, 1500);
 
                /*
                 * Write RF buffer
@@ -3376,10 +3885,10 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                }
 
        } else if (ah->ah_version == AR5K_AR5210) {
-               mdelay(1);
+               usleep_range(1000, 1500);
                /* Disable phy and wait */
                ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
-               mdelay(1);
+               usleep_range(1000, 1500);
        }
 
        /* Set channel on PHY */
@@ -3405,7 +3914,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
        for (i = 0; i <= 20; i++) {
                if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
                        break;
-               udelay(200);
+               usleep_range(200, 250);
        }
        ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
 
@@ -3433,9 +3942,9 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
 
        /* At the same time start I/Q calibration for QAM constellation
         * -no need for CCK- */
-       ah->ah_calibration = false;
+       ah->ah_iq_cal_needed = false;
        if (!(mode == AR5K_MODE_11B)) {
-               ah->ah_calibration = true;
+               ah->ah_iq_cal_needed = true;
                AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
                                AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
                AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
index 7766542..30b50f9 100644 (file)
  */
 
 /********************************************\
-Queue Control Unit, DFS Control Unit Functions
+Queue Control Unit, DCF Control Unit Functions
 \********************************************/
 
 #include "ath5k.h"
 #include "reg.h"
 #include "debug.h"
+#include <linux/log2.h>
+
+/**
+ * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
+ *
+ * Here we setup parameters for the 12 available TX queues. Note that
+ * on the various registers we can usually only map the first 10 of them so
+ * basically we have 10 queues to play with. Each queue has a matching
+ * QCU that controls when the queue will get triggered and multiple QCUs
+ * can be mapped to a single DCU that controls the various DFS parameters
+ * for the various queues. In our setup we have a 1:1 mapping between QCUs
+ * and DCUs allowing us to have different DFS settings for each queue.
+ *
+ * When a frame goes into a TX queue, QCU decides when it'll trigger a
+ * transmission based on various criteria (such as how many data we have inside
+ * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
+ * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
+ * (arbitrator) decides the priority of each QCU based on it's configuration
+ * (e.g. beacons are always transmitted when they leave DCU bypassing all other
+ * frames from other queues waiting to be transmitted). After a frame leaves
+ * the DCU it goes to PCU for further processing and then to PHY for
+ * the actual transmission.
+ */
 
 
 /******************\
 * Helper functions *
 \******************/
 
-/*
- * Get number of pending frames
- * for a specific queue [5211+]
+/**
+ * ath5k_hw_num_tx_pending() - Get number of pending frames for a  given queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
  */
-u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
 {
        u32 pending;
        AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
@@ -58,10 +83,13 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
        return pending;
 }
 
-/*
- * Set a transmit queue inactive
+/**
+ * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
  */
-void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+void
+ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 {
        if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
                return;
@@ -72,34 +100,56 @@ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
        AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
 }
 
-/*
+/**
+ * ath5k_cw_validate() - Make sure the given cw is valid
+ * @cw_req: The contention window value to check
+ *
  * Make sure cw is a power of 2 minus 1 and smaller than 1024
  */
-static u16 ath5k_cw_validate(u16 cw_req)
+static u16
+ath5k_cw_validate(u16 cw_req)
 {
-       u32 cw = 1;
        cw_req = min(cw_req, (u16)1023);
 
-       while (cw < cw_req)
-               cw = (cw << 1) | 1;
+       /* Check if cw_req + 1 a power of 2 */
+       if (is_power_of_2(cw_req + 1))
+               return cw_req;
 
-       return cw;
+       /* Check if cw_req is a power of 2 */
+       if (is_power_of_2(cw_req))
+               return cw_req - 1;
+
+       /* If none of the above is correct
+        * find the closest power of 2 */
+       cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
+
+       return cw_req;
 }
 
-/*
- * Get properties for a transmit queue
+/**
+ * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @queue_info: The &struct ath5k_txq_info to fill
  */
-int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
                struct ath5k_txq_info *queue_info)
 {
        memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
        return 0;
 }
 
-/*
- * Set properties for a transmit queue
+/**
+ * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @qinfo: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success or -EIO if queue is inactive
  */
-int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
                                const struct ath5k_txq_info *qinfo)
 {
        struct ath5k_txq_info *qi;
@@ -139,10 +189,16 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
        return 0;
 }
 
-/*
- * Initialize a transmit queue
+/**
+ * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue_type: One of enum ath5k_tx_queue
+ * @queue_info: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success, -EINVAL on invalid arguments
  */
-int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
+int
+ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
                struct ath5k_txq_info *queue_info)
 {
        unsigned int queue;
@@ -217,10 +273,16 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
 * Single QCU/DCU initialization *
 \*******************************/
 
-/*
- * Set tx retry limits on DCU
+/**
+ * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ *
+ * This function is used when initializing a queue, to set
+ * retry limits based on ah->ah_retry_* and the chipset used.
  */
-void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+void
+ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
                                  unsigned int queue)
 {
        /* Single data queue on AR5210 */
@@ -255,15 +317,15 @@ void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
 }
 
 /**
- * ath5k_hw_reset_tx_queue - Initialize a single hw queue
+ * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
  *
- * @ah The &struct ath5k_hw
- * @queue The hw queue number
- *
- * Set DFS properties for the given transmit queue on DCU
+ * Set DCF properties for the given transmit queue on DCU
  * and configures all queue-specific parameters.
  */
-int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 {
        struct ath5k_txq_info *tq = &ah->ah_txq[queue];
 
@@ -491,10 +553,9 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 \**************************/
 
 /**
- * ath5k_hw_set_ifs_intervals  - Set global inter-frame spaces on DCU
- *
- * @ah The &struct ath5k_hw
- * @slot_time Slot time in us
+ * ath5k_hw_set_ifs_intervals()  - Set global inter-frame spaces on DCU
+ * @ah: The &struct ath5k_hw
+ * @slot_time: Slot time in us
  *
  * Sets the global IFS intervals on DCU (also works on AR5210) for
  * the given slot time and the current bwmode.
@@ -597,7 +658,15 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
 }
 
 
-int ath5k_hw_init_queues(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_queues() - Initialize tx queues
+ * @ah: The &struct ath5k_hw
+ *
+ * Initializes all tx queues based on information on
+ * ah->ah_txq* set by the driver
+ */
+int
+ath5k_hw_init_queues(struct ath5k_hw *ah)
 {
        int i, ret;
 
index f5c1000..0ea1608 100644 (file)
  * 5211/5212 we have one primary and 4 secondary registers.
  * So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212.
  * Most of these bits are common for all chipsets.
+ *
+ * NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain
+ * the logical OR from per-queue interrupt bits found on SISR registers
+ * (see below).
  */
 #define AR5K_ISR               0x001c                  /* Register Address [5210] */
 #define AR5K_PISR              0x0080                  /* Register Address [5211+] */
 #define AR5K_ISR_TXOK          0x00000040      /* Frame successfully transmitted */
 #define AR5K_ISR_TXDESC                0x00000080      /* TX descriptor request */
 #define AR5K_ISR_TXERR         0x00000100      /* Transmit error */
-#define AR5K_ISR_TXNOFRM       0x00000200      /* No frame transmitted (transmit timeout) */
+#define AR5K_ISR_TXNOFRM       0x00000200      /* No frame transmitted (transmit timeout)
+                                                * NOTE: We don't have per-queue info for this
+                                                * one, but we can enable it per-queue through
+                                                * TXNOFRM_QCU field on TXNOFRM register */
 #define AR5K_ISR_TXEOL         0x00000400      /* Empty TX descriptor */
 #define AR5K_ISR_TXURN         0x00000800      /* Transmit FIFO underrun */
 #define AR5K_ISR_MIB           0x00001000      /* Update MIB counters */
 #define AR5K_ISR_SWBA          0x00010000      /* Software beacon alert */
 #define AR5K_ISR_BRSSI         0x00020000      /* Beacon rssi below threshold (?) */
 #define AR5K_ISR_BMISS         0x00040000      /* Beacon missed */
-#define AR5K_ISR_HIUERR                0x00080000      /* Host Interface Unit error [5211+] */
+#define AR5K_ISR_HIUERR                0x00080000      /* Host Interface Unit error [5211+]
+                                                * 'or' of MCABT, SSERR, DPERR from SISR2 */
 #define AR5K_ISR_BNR           0x00100000      /* Beacon not ready [5211+] */
 #define AR5K_ISR_MCABT         0x00100000      /* Master Cycle Abort [5210] */
 #define AR5K_ISR_RXCHIRP       0x00200000      /* CHIRP Received [5212+] */
 #define AR5K_ISR_SSERR         0x00200000      /* Signaled System Error [5210] */
-#define AR5K_ISR_DPERR         0x00400000      /* Det par Error (?) [5210] */
+#define AR5K_ISR_DPERR         0x00400000      /* Bus parity error [5210] */
 #define AR5K_ISR_RXDOPPLER     0x00400000      /* Doppler chirp received [5212+] */
 #define AR5K_ISR_TIM           0x00800000      /* [5211+] */
-#define AR5K_ISR_BCNMISC       0x00800000      /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
-                                               CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
+#define AR5K_ISR_BCNMISC       0x00800000      /* Misc beacon related interrupt
+                                                * 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
+                                                * CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
 #define AR5K_ISR_GPIO          0x01000000      /* GPIO (rf kill) */
 #define AR5K_ISR_QCBRORN       0x02000000      /* QCU CBR overrun [5211+] */
 #define AR5K_ISR_QCBRURN       0x04000000      /* QCU CBR underrun [5211+] */
 #define AR5K_ISR_QTRIG         0x08000000      /* QCU scheduling trigger [5211+] */
 
+#define        AR5K_ISR_BITS_FROM_SISRS        (AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\
+                                       AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\
+                                       AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\
+                                       AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\
+                                       AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG)
+
 /*
  * Secondary status registers [5211+] (0 - 4)
  *
 #define        AR5K_SISR2_BCN_TIMEOUT  0x08000000      /* Beacon Timeout [5212+] */
 #define        AR5K_SISR2_CAB_TIMEOUT  0x10000000      /* CAB Timeout [5212+] */
 #define        AR5K_SISR2_DTIM         0x20000000      /* [5212+] */
-#define        AR5K_SISR2_TSFOOR       0x80000000      /* TSF OOR (?) */
+#define        AR5K_SISR2_TSFOOR       0x80000000      /* TSF Out of range */
 
 #define AR5K_SISR3             0x0090                  /* Register Address [5211+] */
 #define AR5K_SISR3_QCBRORN     0x000003ff      /* Mask for QCBRORN */
index 2abac25..250db40 100644 (file)
@@ -19,9 +19,9 @@
  *
  */
 
-/*****************************\
-  Reset functions and helpers
-\*****************************/
+/****************************\
+  Reset function and helpers
+\****************************/
 
 #include <asm/unaligned.h>
 
 #include "debug.h"
 
 
+/**
+ * DOC: Reset function and helpers
+ *
+ * Here we implement the main reset routine, used to bring the card
+ * to a working state and ready to receive. We also handle routines
+ * that don't fit on other places such as clock, sleep and power control
+ */
+
+
 /******************\
 * Helper functions *
 \******************/
 
-/*
- * Check if a register write has been completed
+/**
+ * ath5k_hw_register_timeout() - Poll a register for a flag/field change
+ * @ah: The &struct ath5k_hw
+ * @reg: The register to read
+ * @flag: The flag/field to check on the register
+ * @val: The field value we expect (if we check a field)
+ * @is_set: Instead of checking if the flag got cleared, check if it got set
+ *
+ * Some registers contain flags that indicate that an operation is
+ * running. We use this function to poll these registers and check
+ * if these flags get cleared. We also use it to poll a register
+ * field (containing multiple flags) until it gets a specific value.
+ *
+ * Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0
  */
-int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+int
+ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
                              bool is_set)
 {
        int i;
@@ -64,35 +86,48 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
 \*************************/
 
 /**
- * ath5k_hw_htoclock - Translate usec to hw clock units
- *
+ * ath5k_hw_htoclock() - Translate usec to hw clock units
  * @ah: The &struct ath5k_hw
  * @usec: value in microseconds
+ *
+ * Translate usecs to hw clock units based on the current
+ * hw clock rate.
+ *
+ * Returns number of clock units
  */
-unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+unsigned int
+ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
 {
        struct ath_common *common = ath5k_hw_common(ah);
        return usec * common->clockrate;
 }
 
 /**
- * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * ath5k_hw_clocktoh() - Translate hw clock units to usec
+ * @ah: The &struct ath5k_hw
  * @clock: value in hw clock units
+ *
+ * Translate hw clock units to usecs based on the current
+ * hw clock rate.
+ *
+ * Returns number of usecs
  */
-unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+unsigned int
+ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
 {
        struct ath_common *common = ath5k_hw_common(ah);
        return clock / common->clockrate;
 }
 
 /**
- * ath5k_hw_init_core_clock - Initialize core clock
- *
- * @ah The &struct ath5k_hw
+ * ath5k_hw_init_core_clock() - Initialize core clock
+ * @ah: The &struct ath5k_hw
  *
- * Initialize core clock parameters (usec, usec32, latencies etc).
+ * Initialize core clock parameters (usec, usec32, latencies etc),
+ * based on current bwmode and chipset properties.
  */
-static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
+static void
+ath5k_hw_init_core_clock(struct ath5k_hw *ah)
 {
        struct ieee80211_channel *channel = ah->ah_current_channel;
        struct ath_common *common = ath5k_hw_common(ah);
@@ -227,16 +262,21 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
        }
 }
 
-/*
+/**
+ * ath5k_hw_set_sleep_clock() - Setup sleep clock operation
+ * @ah: The &struct ath5k_hw
+ * @enable: Enable sleep clock operation (false to disable)
+ *
  * If there is an external 32KHz crystal available, use it
  * as ref. clock instead of 32/40MHz clock and baseband clocks
  * to save power during sleep or restore normal 32/40MHz
  * operation.
  *
- * XXX: When operating on 32KHz certain PHY registers (27 - 31,
- *     123 - 127) require delay on access.
+ * NOTE: When operating on 32KHz certain PHY registers (27 - 31,
+ * 123 - 127) require delay on access.
  */
-static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
+static void
+ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
 {
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
        u32 scal, spending, sclock;
@@ -340,10 +380,19 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
 * Reset/Sleep control *
 \*********************/
 
-/*
- * Reset chipset
+/**
+ * ath5k_hw_nic_reset() - Reset the various chipset units
+ * @ah: The &struct ath5k_hw
+ * @val: Mask to indicate what units to reset
+ *
+ * To reset the various chipset units we need to write
+ * the mask to AR5K_RESET_CTL and poll the register until
+ * all flags are cleared.
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
  */
-static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
+static int
+ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
 {
        int ret;
        u32 mask = val ? val : ~0U;
@@ -357,7 +406,7 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
        ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
 
        /* Wait at least 128 PCI clocks */
-       udelay(15);
+       usleep_range(15, 20);
 
        if (ah->ah_version == AR5K_AR5210) {
                val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
@@ -382,12 +431,17 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
        return ret;
 }
 
-/*
- * Reset AHB chipset
- * AR5K_RESET_CTL_PCU flag resets WMAC
- * AR5K_RESET_CTL_BASEBAND flag resets WBB
+/**
+ * ath5k_hw_wisoc_reset() -  Reset AHB chipset
+ * @ah: The &struct ath5k_hw
+ * @flags: Mask to indicate what units to reset
+ *
+ * Same as ath5k_hw_nic_reset but for AHB based devices
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
  */
-static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+static int
+ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
 {
        u32 mask = flags ? flags : ~0U;
        u32 __iomem *reg;
@@ -422,7 +476,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
        regval = __raw_readl(reg);
        __raw_writel(regval | val, reg);
        regval = __raw_readl(reg);
-       udelay(100);
+       usleep_range(100, 150);
 
        /* Bring BB/MAC out of reset */
        __raw_writel(regval & ~val, reg);
@@ -439,11 +493,23 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
        return 0;
 }
 
-
-/*
- * Sleep control
+/**
+ * ath5k_hw_set_power_mode() - Set power mode
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_power_mode
+ * @set_chip: Set to true to write sleep control register
+ * @sleep_duration: How much time the device is allowed to sleep
+ * when sleep logic is enabled (in 128 microsecond increments).
+ *
+ * This function is used to configure sleep policy and allowed
+ * sleep modes. For more information check out the sleep control
+ * register on reg.h and STA_ID1.
+ *
+ * Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid
+ * mode is requested.
  */
-static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+static int
+ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
                              bool set_chip, u16 sleep_duration)
 {
        unsigned int i;
@@ -493,7 +559,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
 
                ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
                                                        AR5K_SLEEP_CTL);
-               udelay(15);
+               usleep_range(15, 20);
 
                for (i = 200; i > 0; i--) {
                        /* Check if the chip did wake up */
@@ -502,7 +568,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
                                break;
 
                        /* Wait a bit and retry */
-                       udelay(50);
+                       usleep_range(50, 75);
                        ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
                                                        AR5K_SLEEP_CTL);
                }
@@ -523,17 +589,20 @@ commit:
        return 0;
 }
 
-/*
- * Put device on hold
+/**
+ * ath5k_hw_on_hold() - Put device on hold
+ * @ah: The &struct ath5k_hw
  *
- * Put MAC and Baseband on warm reset and
- * keep that state (don't clean sleep control
- * register). After this MAC and Baseband are
- * disabled and a full reset is needed to come
- * back. This way we save as much power as possible
+ * Put MAC and Baseband on warm reset and keep that state
+ * (don't clean sleep control register). After this MAC
+ * and Baseband are disabled and a full reset is needed
+ * to come back. This way we save as much power as possible
  * without putting the card on full sleep.
+ *
+ * Returns 0 on success or -EIO on error
  */
-int ath5k_hw_on_hold(struct ath5k_hw *ah)
+int
+ath5k_hw_on_hold(struct ath5k_hw *ah)
 {
        struct pci_dev *pdev = ah->pdev;
        u32 bus_flags;
@@ -543,7 +612,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
                return 0;
 
        /* Make sure device is awake */
-       ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+       ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
        if (ret) {
                ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
                return ret;
@@ -563,7 +632,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
                ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
                        AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
                        AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
-                       mdelay(2);
+                       usleep_range(2000, 2500);
        } else {
                ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
                        AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -575,7 +644,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
        }
 
        /* ...wakeup again!*/
-       ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+       ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
        if (ret) {
                ATH5K_ERR(ah, "failed to put device on hold\n");
                return ret;
@@ -584,11 +653,18 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
        return ret;
 }
 
-/*
+/**
+ * ath5k_hw_nic_wakeup() - Force card out of sleep
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
  * Bring up MAC + PHY Chips and program PLL
- * Channel is NULL for the initial wakeup.
+ * NOTE: Channel is NULL for the initial wakeup.
+ *
+ * Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos
  */
-int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+int
+ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
 {
        struct pci_dev *pdev = ah->pdev;
        u32 turbo, mode, clock, bus_flags;
@@ -600,7 +676,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
 
        if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
                /* Wakeup the device */
-               ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+               ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
                if (ret) {
                        ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
                        return ret;
@@ -621,7 +697,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
                ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
                        AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
                        AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
-                       mdelay(2);
+                       usleep_range(2000, 2500);
        } else {
                if (ath5k_get_bus_type(ah) == ATH_AHB)
                        ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -637,7 +713,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
        }
 
        /* ...wakeup again!...*/
-       ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+       ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
        if (ret) {
                ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
                return ret;
@@ -739,7 +815,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
                /* ...update PLL if needed */
                if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
                        ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
-                       udelay(300);
+                       usleep_range(300, 350);
                }
 
                /* ...set the PHY operating mode */
@@ -755,8 +831,19 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
 * Post-initvals register modifications *
 \**************************************/
 
-/* TODO: Half/Quarter rate */
-static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_tweak_initval_settings() - Tweak initial settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Some settings are not handled on initvals, e.g. bwmode
+ * settings, some phy settings, workarounds etc that in general
+ * don't fit anywhere else or are too small to introduce a separate
+ * function for each one. So we have this function to handle
+ * them all during reset and complete card's initialization.
+ */
+static void
+ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
                                struct ieee80211_channel *channel)
 {
        if (ah->ah_version == AR5K_AR5212 &&
@@ -875,7 +962,16 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
        }
 }
 
-static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Use settings stored on EEPROM to properly initialize the card
+ * based on various infos and per-mode calibration data.
+ */
+static void
+ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -1029,7 +1125,23 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
 * Main reset function *
 \*********************/
 
-int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+/**
+ * ath5k_hw_reset() - The main reset function
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @channel: The &struct ieee80211_channel
+ * @fast: Enable fast channel switching
+ * @skip_pcu: Skip pcu initialization
+ *
+ * This is the function we call each time we want to (re)initialize the
+ * card and pass new settings to hw. We also call it when hw runs into
+ * trouble to make it come back to a working state.
+ *
+ * Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO
+ * on failure.
+ */
+int
+ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
                struct ieee80211_channel *channel, bool fast, bool skip_pcu)
 {
        u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
@@ -1047,7 +1159,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
         */
        if (fast && (ah->ah_radio != AR5K_RF2413) &&
        (ah->ah_radio != AR5K_RF5413))
-               fast = 0;
+               fast = false;
 
        /* Disable sleep clock operation
         * to avoid register access delay on certain
@@ -1073,7 +1185,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
        if (ret && fast) {
                ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
                        "DMA didn't stop, falling back to normal reset\n");
-               fast = 0;
+               fast = false;
                /* Non fatal, just continue with
                 * normal reset */
                ret = 0;
@@ -1242,7 +1354,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
        /*
         * Initialize PCU
         */
-       ath5k_hw_pcu_init(ah, op_mode, mode);
+       ath5k_hw_pcu_init(ah, op_mode);
 
        /*
         * Initialize PHY
index 5d11c23..aed34d9 100644 (file)
@@ -18,7 +18,9 @@
  */
 
 
-/*
+/**
+ * DOC: RF Buffer registers
+ *
  * There are some special registers on the RF chip
  * that control various operation settings related mostly to
  * the analog parts (channel, gain adjustment etc).
  */
 
 
-/*
+/**
+ * struct ath5k_ini_rfbuffer - Initial RF Buffer settings
+ * @rfb_bank: RF Bank number
+ * @rfb_ctrl_register: RF Buffer control register
+ * @rfb_mode_data: RF Buffer data for each mode
+ *
  * Struct to hold default mode specific RF
- * register values (RF Banks)
+ * register values (RF Banks) for each chip.
  */
 struct ath5k_ini_rfbuffer {
-       u8      rfb_bank;               /* RF Bank number */
-       u16     rfb_ctrl_register;      /* RF Buffer control register */
-       u32     rfb_mode_data[3];       /* RF Buffer data for each mode */
+       u8      rfb_bank;
+       u16     rfb_ctrl_register;
+       u32     rfb_mode_data[3];
 };
 
-/*
+/**
+ * struct ath5k_rfb_field - An RF Buffer field (register/value)
+ * @len: Field length
+ * @pos: Offset on the raw packet
+ * @col: Used for shifting
+ *
  * Struct to hold RF Buffer field
  * infos used to access certain RF
  * analog registers
  */
 struct ath5k_rfb_field {
-       u8      len;    /* Field length */
-       u16     pos;    /* Offset on the raw packet */
-       u8      col;    /* Column -used for shifting */
+       u8      len;
+       u16     pos;
+       u8      col;
 };
 
-/*
- * RF analog register definition
+/**
+ * struct ath5k_rf_reg - RF analog register definition
+ * @bank: RF Buffer Bank number
+ * @index: Register's index on ath5k_rf_regx_idx
+ * @field: The &struct ath5k_rfb_field
+ *
+ * We use this struct to define the set of RF registers
+ * on each chip that we want to tweak. Some RF registers
+ * are common between different chip versions so this saves
+ * us space and complexity because we can refer to an rf
+ * register by it's index no matter what chip we work with
+ * as long as it has that register.
  */
 struct ath5k_rf_reg {
-       u8                      bank;   /* RF Buffer Bank number */
-       u8                      index;  /* Register's index on rf_regs_idx */
-       struct ath5k_rfb_field  field;  /* RF Buffer field for this register */
+       u8                      bank;
+       u8                      index;
+       struct ath5k_rfb_field  field;
 };
 
-/* Map RF registers to indexes
+/**
+ * enum ath5k_rf_regs_idx - Map RF registers to indexes
+ *
  * We do this to handle common bits and make our
  * life easier by using an index for each register
- * instead of a full rfb_field */
+ * instead of a full rfb_field
+ */
 enum ath5k_rf_regs_idx {
        /* BANK 2 */
        AR5K_RF_TURBO = 0,
index ebfae05..4d21df0 100644 (file)
  *
  */
 
-/*
+/**
+ * struct ath5k_ini_rfgain - RF Gain table
+ * @rfg_register: RF Gain register address
+ * @rfg_value: Register value for 5 and 2GHz
+ *
  * Mode-specific RF Gain table (64bytes) for RF5111/5112
  * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
  * RF Gain values are included in AR5K_AR5210_INI)
  */
 struct ath5k_ini_rfgain {
-       u16     rfg_register;   /* RF Gain register address */
+       u16     rfg_register;
        u32     rfg_value[2];   /* [freq (see below)] */
 };
 
@@ -455,18 +459,31 @@ static const struct ath5k_ini_rfgain rfgain_2425[] = {
 #define AR5K_GAIN_CHECK_ADJUST(_g)             \
        ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
 
+/**
+ * struct ath5k_gain_opt_step - An RF gain optimization step
+ * @gos_param: Set of parameters
+ * @gos_gain: Gain
+ */
 struct ath5k_gain_opt_step {
        s8                              gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
        s8                              gos_gain;
 };
 
+/**
+ * struct ath5k_gain_opt - RF Gain optimization ladder
+ * @go_default: The default step
+ * @go_steps_count: How many optimization steps
+ * @go_step: Array of &struct ath5k_gain_opt_step
+ */
 struct ath5k_gain_opt {
        u8                              go_default;
        u8                              go_steps_count;
        const struct ath5k_gain_opt_step        go_step[AR5K_GAIN_STEP_COUNT];
 };
 
+
 /*
+ * RF5111
  * Parameters on gos_param:
  * 1) Tx clip PHY register
  * 2) PWD 90 RF register
@@ -490,6 +507,7 @@ static const struct ath5k_gain_opt rfgain_opt_5111 = {
 };
 
 /*
+ * RF5112
  * Parameters on gos_param:
  * 1) Mixgain ovr RF register
  * 2) PWD 138 RF register
index 30050af..5acb4a4 100644 (file)
@@ -361,7 +361,7 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
                        ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P "
                                   "capabilities (%d) - assuming P2P not "
                                   "supported\n", ret);
-                       ar->p2p = 0;
+                       ar->p2p = false;
                }
        }
 
index 7b4c074..1b4786a 100644 (file)
@@ -2,6 +2,9 @@ config ATH9K_HW
        tristate
 config ATH9K_COMMON
        tristate
+config ATH9K_DFS_DEBUGFS
+       def_bool y
+       depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
 
 config ATH9K
        tristate "Atheros 802.11n wireless cards support"
@@ -51,6 +54,25 @@ config ATH9K_DEBUGFS
 
          Also required for changing debug message flags at run time.
 
+config ATH9K_DFS_CERTIFIED
+       bool "Atheros DFS support for certified platforms"
+       depends on ATH9K && EXPERT
+       default n
+       ---help---
+         This option enables DFS support for initiating radiation on
+         ath9k. There is no way to dynamically detect if a card was DFS
+         certified and as such this is left as a build time option. This
+         option should only be enabled by system integrators that can
+         guarantee that all the platforms that their kernel will run on
+         have obtained appropriate regulatory body certification for a
+         respective Atheros card by using ath9k on the target shipping
+         platforms.
+
+         This is currently only a placeholder for future DFS support,
+         as DFS support requires more components that still need to be
+         developed. At this point enabling this option won't do anything
+         except increase code size.
+
 config ATH9K_RATE_CONTROL
        bool "Atheros ath9k rate control"
        depends on ATH9K
index 49d3f25..da02242 100644 (file)
@@ -10,6 +10,8 @@ ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
 ath9k-$(CONFIG_ATH9K_PCI) += pci.o
 ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
 ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
+ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
+ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
 
 obj-$(CONFIG_ATH9K) += ath9k.o
 
@@ -34,7 +36,8 @@ ath9k_hw-y:=  \
                ar9002_mac.o \
                ar9003_mac.o \
                ar9003_eeprom.o \
-               ar9003_paprd.o
+               ar9003_paprd.o \
+               ar9003_mci.o
 
 obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
 
index 88279e3..157337f 100644 (file)
@@ -203,7 +203,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
                        i);
 
                ath_dbg(common, ATH_DBG_CALIBRATE,
-                       "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+                       "Original: Chn %d iq_corr_meas = 0x%08x\n",
                        i, ah->totalIqCorrMeas[i]);
 
                iqCorrNeg = 0;
index 12a730d..23b3a6c 100644 (file)
@@ -18,6 +18,7 @@
 #include "hw-ops.h"
 #include "ar9003_phy.h"
 #include "ar9003_rtt.h"
+#include "ar9003_mci.h"
 
 #define MAX_MEASUREMENT        MAX_IQCAL_MEASUREMENT
 #define MAX_MAG_DELTA  11
@@ -225,7 +226,7 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
                        i);
 
                ath_dbg(common, ATH_DBG_CALIBRATE,
-                       "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+                       "Original: Chn %d iq_corr_meas = 0x%08x\n",
                        i, ah->totalIqCorrMeas[i]);
 
                iqCorrNeg = 0;
@@ -824,7 +825,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
                                                chan_info_tab[i] + offset);
 
                                ath_dbg(common, ATH_DBG_CALIBRATE,
-                                       "IQ RES[%d]=0x%x"
+                                       "IQ_RES[%d]=0x%x "
                                        "IQ_RES[%d]=0x%x\n",
                                        idx, iq_res[idx], idx + 1,
                                        iq_res[idx + 1]);
@@ -934,10 +935,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
 {
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_hw_cal_data *caldata = ah->caldata;
+       struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
        bool txiqcal_done = false, txclcal_done = false;
        bool is_reusable = true, status = true;
        bool run_rtt_cal = false, run_agc_cal;
        bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+       bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
        u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
                                          AR_PHY_AGC_CONTROL_FLTR_CAL   |
                                          AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -1005,6 +1008,31 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
        } else if (caldata && !caldata->done_txiqcal_once)
                run_agc_cal = true;
 
+       if (mci && IS_CHAN_2GHZ(chan) &&
+           (mci_hw->bt_state  == MCI_BT_AWAKE) &&
+           run_agc_cal &&
+           !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+               u32 pld[4] = {0, 0, 0, 0};
+
+               /* send CAL_REQ only when BT is AWAKE. */
+               ath_dbg(common, ATH_DBG_MCI, "MCI send WLAN_CAL_REQ 0x%x\n",
+                       mci_hw->wlan_cal_seq);
+               MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
+               pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
+               ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+
+               /* Wait BT_CAL_GRANT for 50ms */
+               ath_dbg(common, ATH_DBG_MCI, "MCI wait for BT_CAL_GRANT");
+
+               if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000))
+                       ath_dbg(common, ATH_DBG_MCI, "MCI got BT_CAL_GRANT");
+               else {
+                       is_reusable = false;
+                       ath_dbg(common, ATH_DBG_MCI, "\nMCI BT is not responding");
+               }
+       }
+
        txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
        REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
        udelay(5);
@@ -1022,6 +1050,21 @@ skip_tx_iqcal:
                                       AR_PHY_AGC_CONTROL_CAL,
                                       0, AH_WAIT_TIMEOUT);
        }
+
+       if (mci && IS_CHAN_2GHZ(chan) &&
+           (mci_hw->bt_state  == MCI_BT_AWAKE) &&
+           run_agc_cal &&
+           !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+               u32 pld[4] = {0, 0, 0, 0};
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI Send WLAN_CAL_DONE 0x%x\n",
+                       mci_hw->wlan_cal_done);
+               MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
+               pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
+               ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+       }
+
        if (rtt && !run_rtt_cal) {
                agc_ctrl |= agc_supp_cals;
                REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
index a93bd63..4ba6f52 100644 (file)
@@ -4779,7 +4779,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
 {
        struct ath_common *common = ath9k_hw_common(ah);
        struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
-       u16 twiceMaxEdgePower = MAX_RATE_POWER;
+       u16 twiceMaxEdgePower;
        int i;
        u16 scaledPower = 0, minCtlPower;
        static const u16 ctlModesFor11a[] = {
@@ -4880,6 +4880,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
                        ctlNum = AR9300_NUM_CTLS_5G;
                }
 
+               twiceMaxEdgePower = MAX_RATE_POWER;
                for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
                        ath_dbg(common, ATH_DBG_REGULATORY,
                                "LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",
index ccde784..631fe4f 100644 (file)
@@ -175,20 +175,24 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
        u32 isr = 0;
        u32 mask2 = 0;
        struct ath9k_hw_capabilities *pCap = &ah->caps;
-       u32 sync_cause = 0;
        struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 sync_cause = 0, async_cause;
 
-       if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+       async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+
+       if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
                if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
                                == AR_RTC_STATUS_ON)
                        isr = REG_READ(ah, AR_ISR);
        }
 
+
        sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
 
        *masked = 0;
 
-       if (!isr && !sync_cause)
+       if (!isr && !sync_cause && !async_cause)
                return false;
 
        if (isr) {
@@ -294,6 +298,35 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
                        ar9003_hw_bb_watchdog_read(ah);
        }
 
+       if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
+               u32 raw_intr, rx_msg_intr;
+
+               rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+               raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
+
+               if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI gets 0xdeadbeef during MCI int processing"
+                               "new raw_intr=0x%08x, new rx_msg_raw=0x%08x, "
+                               "raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
+                               raw_intr, rx_msg_intr, mci->raw_intr,
+                               mci->rx_msg_intr);
+               else {
+                       mci->rx_msg_intr |= rx_msg_intr;
+                       mci->raw_intr |= raw_intr;
+                       *masked |= ATH9K_INT_MCI;
+
+                       if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
+                               mci->cont_status =
+                                       REG_READ(ah, AR_MCI_CONT_STATUS);
+
+                       REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
+                       REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
+                       ath_dbg(common, ATH_DBG_MCI, "AR_INTR_SYNC_MCI\n");
+
+               }
+       }
+
        if (sync_cause) {
                if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
                        REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
new file mode 100644 (file)
index 0000000..8599822
--- /dev/null
@@ -0,0 +1,1464 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_mci.h"
+
+static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
+{
+       if (!AR_SREV_9462_20(ah))
+               return;
+
+       REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+                     AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1);
+       udelay(1);
+       REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+                     AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0);
+}
+
+static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
+                                       u32 bit_position, int time_out)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       while (time_out) {
+
+               if (REG_READ(ah, address) & bit_position) {
+
+                       REG_WRITE(ah, address, bit_position);
+
+                       if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
+
+                               if (bit_position &
+                                   AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+                                       ar9003_mci_reset_req_wakeup(ah);
+
+                               if (bit_position &
+                                   (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
+                                    AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
+                                       REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                                       AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+
+                               REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                                         AR_MCI_INTERRUPT_RX_MSG);
+                       }
+                       break;
+               }
+
+               udelay(10);
+               time_out -= 10;
+
+               if (time_out < 0)
+                       break;
+       }
+
+       if (time_out <= 0) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Wait for Reg 0x%08x = 0x%08x timeout.\n",
+                       address, bit_position);
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x",
+                       REG_READ(ah, AR_MCI_INTERRUPT_RAW),
+                       REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+               time_out = 0;
+       }
+
+       return time_out;
+}
+
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
+{
+       u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00};
+
+       ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16,
+                               wait_done, false);
+       udelay(5);
+}
+
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
+{
+       u32 payload = 0x00000000;
+
+       ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1,
+                               wait_done, false);
+}
+
+static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done)
+{
+       ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP,
+                               NULL, 0, wait_done, false);
+       udelay(5);
+}
+
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
+{
+       ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP,
+                               NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done)
+{
+       u32 payload = 0x70000000;
+
+       ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1,
+                               wait_done, false);
+}
+
+static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done)
+{
+       ar9003_mci_send_message(ah, MCI_SYS_SLEEPING,
+                               MCI_FLAG_DISABLE_TIMESTAMP,
+                               NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
+                                              bool wait_done)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 payload[4] = {0, 0, 0, 0};
+
+       if (!mci->bt_version_known &&
+                       (mci->bt_state != MCI_BT_SLEEP)) {
+               ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex version query\n");
+               MCI_GPM_SET_TYPE_OPCODE(payload,
+                               MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY);
+               ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+                               wait_done, true);
+       }
+}
+
+static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
+                                                    bool wait_done)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 payload[4] = {0, 0, 0, 0};
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex version response\n");
+       MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+                       MCI_GPM_COEX_VERSION_RESPONSE);
+       *(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) =
+               mci->wlan_ver_major;
+       *(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) =
+               mci->wlan_ver_minor;
+       ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
+                                                 bool wait_done)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 *payload = &mci->wlan_channels[0];
+
+       if ((mci->wlan_channels_update == true) &&
+                       (mci->bt_state != MCI_BT_SLEEP)) {
+               MCI_GPM_SET_TYPE_OPCODE(payload,
+               MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS);
+               ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+                                       wait_done, true);
+               MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
+       }
+}
+
+static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
+                                               bool wait_done, u8 query_type)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 payload[4] = {0, 0, 0, 0};
+       bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
+                                            MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+
+       if (mci->bt_state != MCI_BT_SLEEP) {
+
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Send Coex BT Status Query 0x%02X\n", query_type);
+
+               MCI_GPM_SET_TYPE_OPCODE(payload,
+                               MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY);
+
+               *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
+               /*
+                * If bt_status_query message is  not sent successfully,
+                * then need_flush_btinfo should be set again.
+                */
+               if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+                                            wait_done, true)) {
+                       if (query_btinfo) {
+                               mci->need_flush_btinfo = true;
+
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI send bt_status_query fail, "
+                                       "set flush flag again\n");
+                       }
+               }
+
+               if (query_btinfo)
+                       mci->query_bt = false;
+       }
+}
+
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+                                     bool wait_done)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 payload[4] = {0, 0, 0, 0};
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex %s BT GPM.\n",
+               (halt) ? "halt" : "unhalt");
+
+       MCI_GPM_SET_TYPE_OPCODE(payload,
+                               MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM);
+
+       if (halt) {
+               mci->query_bt = true;
+               /* Send next unhalt no matter halt sent or not */
+               mci->unhalt_bt_gpm = true;
+               mci->need_flush_btinfo = true;
+               *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+                       MCI_GPM_COEX_BT_GPM_HALT;
+       } else
+               *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+                       MCI_GPM_COEX_BT_GPM_UNHALT;
+
+       ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+
+static void ar9003_mci_prep_interface(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 saved_mci_int_en;
+       u32 mci_timeout = 150;
+
+       mci->bt_state = MCI_BT_SLEEP;
+       saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+
+       REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+       REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                 REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+       REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                 REG_READ(ah, AR_MCI_INTERRUPT_RAW));
+
+       /* Remote Reset */
+       ath_dbg(common, ATH_DBG_MCI, "MCI Reset sequence start\n");
+       ath_dbg(common, ATH_DBG_MCI, "MCI send REMOTE_RESET\n");
+       ar9003_mci_remote_reset(ah, true);
+
+       /*
+        * This delay is required for the reset delay worst case value 255 in
+        * MCI_COMMAND2 register
+        */
+
+       if (AR_SREV_9462_10(ah))
+               udelay(252);
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI Send REQ_WAKE to remoter(BT)\n");
+       ar9003_mci_send_req_wake(ah, true);
+
+       if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                               AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
+
+               ath_dbg(common, ATH_DBG_MCI,
+                               "MCI SYS_WAKING from remote(BT)\n");
+               mci->bt_state = MCI_BT_AWAKE;
+
+               if (AR_SREV_9462_10(ah))
+                       udelay(10);
+               /*
+                * we don't need to send more remote_reset at this moment.
+                * If BT receive first remote_reset, then BT HW will
+                * be cleaned up and will be able to receive req_wake
+                * and BT HW will respond sys_waking.
+                * In this case, WLAN will receive BT's HW sys_waking.
+                * Otherwise, if BT SW missed initial remote_reset,
+                * that remote_reset will still clean up BT MCI RX,
+                * and the req_wake will wake BT up,
+                * and BT SW will respond this req_wake with a remote_reset and
+                * sys_waking. In this case, WLAN will receive BT's SW
+                * sys_waking. In either case, BT's RX is cleaned up. So we
+                * don't need to reply BT's remote_reset now, if any.
+                * Similarly, if in any case, WLAN can receive BT's sys_waking,
+                * that means WLAN's RX is also fine.
+                */
+
+               /* Send SYS_WAKING to BT */
+
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI send SW SYS_WAKING to remote BT\n");
+
+               ar9003_mci_send_sys_waking(ah, true);
+               udelay(10);
+
+               /*
+                * Set BT priority interrupt value to be 0xff to
+                * avoid having too many BT PRIORITY interrupts.
+                */
+
+               REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
+               REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
+               REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
+               REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
+               REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
+
+               /*
+                * A contention reset will be received after send out
+                * sys_waking. Also BT priority interrupt bits will be set.
+                * Clear those bits before the next step.
+                */
+
+               REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                         AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
+               REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                         AR_MCI_INTERRUPT_BT_PRI);
+
+               if (AR_SREV_9462_10(ah) || mci->is_2g) {
+                       /* Send LNA_TRANS */
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI send LNA_TRANS to BT\n");
+                       ar9003_mci_send_lna_transfer(ah, true);
+                       udelay(5);
+               }
+
+               if (AR_SREV_9462_10(ah) || (mci->is_2g &&
+                                           !mci->update_2g5g)) {
+                       if (ar9003_mci_wait_for_interrupt(ah,
+                               AR_MCI_INTERRUPT_RX_MSG_RAW,
+                               AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
+                               mci_timeout))
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI WLAN has control over the LNA & "
+                                       "BT obeys it\n");
+                       else
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI BT didn't respond to"
+                                       "LNA_TRANS\n");
+               }
+
+               if (AR_SREV_9462_10(ah)) {
+                       /* Send another remote_reset to deassert BT clk_req. */
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI another remote_reset to "
+                               "deassert clk_req\n");
+                       ar9003_mci_remote_reset(ah, true);
+                       udelay(252);
+               }
+       }
+
+       /* Clear the extra redundant SYS_WAKING from BT */
+       if ((mci->bt_state == MCI_BT_AWAKE) &&
+               (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                               AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
+               (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                               AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
+
+                       REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                                 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
+                       REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                                 AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+       }
+
+       REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+}
+
+void ar9003_mci_disable_interrupt(struct ath_hw *ah)
+{
+       REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+       REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+}
+
+void ar9003_mci_enable_interrupt(struct ath_hw *ah)
+{
+
+       REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT);
+       REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
+                 AR_MCI_INTERRUPT_RX_MSG_DEFAULT);
+}
+
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
+{
+       u32 intr;
+
+       intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+       return ((intr & ints) == ints);
+}
+
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+                             u32 *rx_msg_intr)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       *raw_intr = mci->raw_intr;
+       *rx_msg_intr = mci->rx_msg_intr;
+
+       /* Clean int bits after the values are read. */
+       mci->raw_intr = 0;
+       mci->rx_msg_intr = 0;
+}
+EXPORT_SYMBOL(ar9003_mci_get_interrupt);
+
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+       if (!mci->update_2g5g &&
+           (mci->is_2g != is_2g))
+               mci->update_2g5g = true;
+
+       mci->is_2g = is_2g;
+}
+
+static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 *payload;
+       u32 recv_type, offset;
+
+       if (msg_index == MCI_GPM_INVALID)
+               return false;
+
+       offset = msg_index << 4;
+
+       payload = (u32 *)(mci->gpm_buf + offset);
+       recv_type = MCI_GPM_TYPE(payload);
+
+       if (recv_type == MCI_GPM_RSVD_PATTERN) {
+               ath_dbg(common, ATH_DBG_MCI, "MCI Skip RSVD GPM\n");
+               return false;
+       }
+
+       return true;
+}
+
+static void ar9003_mci_observation_set_up(struct ath_hw *ah)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
+
+               ath9k_hw_cfg_output(ah, 3,
+                                       AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+               ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+               ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+               ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+       } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
+
+               ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+               ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+               ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+               ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+               ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+       } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
+
+               ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+               ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+               ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+               ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+       } else
+               return;
+
+       REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+
+       if (AR_SREV_9462_20_OR_LATER(ah)) {
+               REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+                             AR_GLB_DS_JTAG_DISABLE, 1);
+               REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+                             AR_GLB_WLAN_UART_INTF_EN, 0);
+               REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL,
+                           ATH_MCI_CONFIG_MCI_OBS_GPIO);
+       }
+
+       REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
+       REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
+       REG_WRITE(ah, AR_OBS, 0x4b);
+       REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
+       REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
+       REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
+       REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
+       REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
+                     AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
+}
+
+static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
+                                               u8 opcode, u32 bt_flags)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       u32 pld[4] = {0, 0, 0, 0};
+
+       MCI_GPM_SET_TYPE_OPCODE(pld,
+                       MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS);
+
+       *(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP)  = opcode;
+       *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF;
+       *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF;
+       *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF;
+       *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF;
+
+       ath_dbg(common, ATH_DBG_MCI,
+               "MCI BT_MCI_FLAGS: Send Coex BT Update Flags %s 0x%08x\n",
+               (opcode == MCI_GPM_COEX_BT_FLAGS_READ) ? "READ" :
+               ((opcode == MCI_GPM_COEX_BT_FLAGS_SET) ? "SET" : "CLEAR"),
+                                                               bt_flags);
+
+       return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16,
+                                                       wait_done, true);
+}
+
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+                     bool is_full_sleep)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 regval, thresh;
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI full_sleep = %d, is_2g = %d\n",
+               is_full_sleep, is_2g);
+
+       /*
+        * GPM buffer and scheduling message buffer are not allocated
+        */
+
+       if (!mci->gpm_addr && !mci->sched_addr) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI GPM and schedule buffers are not allocated");
+               return;
+       }
+
+       if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI it's deadbeef, quit mci_reset\n");
+               return;
+       }
+
+       /* Program MCI DMA related registers */
+       REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr);
+       REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len);
+       REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr);
+
+       /*
+       * To avoid MCI state machine be affected by incoming remote MCI msgs,
+       * MCI mode will be enabled later, right before reset the MCI TX and RX.
+       */
+
+       regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+                SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+                SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+                SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+                SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+                SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+                SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+       if (is_2g && (AR_SREV_9462_20(ah)) &&
+               !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
+
+               regval |= SM(1, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+               ath_dbg(common, ATH_DBG_MCI,
+                               "MCI sched one step look ahead\n");
+
+               if (!(mci->config &
+                     ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
+
+                       thresh = MS(mci->config,
+                                   ATH_MCI_CONFIG_AGGR_THRESH);
+                       thresh &= 7;
+                       regval |= SM(1,
+                                    AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN);
+                       regval |= SM(thresh, AR_BTCOEX_CTRL_AGGR_THRESH);
+
+                       REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+                                     AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
+                       REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+                                     AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+
+               } else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI sched aggr thresh: off\n");
+       } else
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI SCHED one step look ahead off\n");
+
+       if (AR_SREV_9462_10(ah))
+               regval |= SM(1, AR_BTCOEX_CTRL_SPDT_ENABLE_10);
+
+       REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+
+       if (AR_SREV_9462_20(ah)) {
+               REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+                           AR_BTCOEX_CTRL_SPDT_ENABLE);
+               REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
+                             AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
+       }
+
+       REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
+       REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
+
+       thresh = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
+       REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, thresh);
+       REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
+
+       /* Resetting the Rx and Tx paths of MCI */
+       regval = REG_READ(ah, AR_MCI_COMMAND2);
+       regval |= SM(1, AR_MCI_COMMAND2_RESET_TX);
+       REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+       udelay(1);
+
+       regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX);
+       REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+       if (is_full_sleep) {
+               ar9003_mci_mute_bt(ah);
+               udelay(100);
+       }
+
+       regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
+       REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+       udelay(1);
+       regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
+       REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+       ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+       REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
+                 (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
+                  SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
+
+       REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+                       AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+       if (AR_SREV_9462_20_OR_LATER(ah))
+               ar9003_mci_observation_set_up(ah);
+
+       mci->ready = true;
+       ar9003_mci_prep_interface(ah);
+
+       if (en_int)
+               ar9003_mci_enable_interrupt(ah);
+}
+
+void ar9003_mci_mute_bt(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       /* disable all MCI messages */
+       REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
+       REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+       /* wait pending HW messages to flush out */
+       udelay(10);
+
+       /*
+        * Send LNA_TAKE and SYS_SLEEPING when
+        * 1. reset not after resuming from full sleep
+        * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
+        */
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA take\n");
+       ar9003_mci_send_lna_take(ah, true);
+
+       udelay(5);
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI Send sys sleeping\n");
+       ar9003_mci_send_sys_sleeping(ah, true);
+}
+
+void ar9003_mci_sync_bt_state(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 cur_bt_state;
+
+       cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
+
+       if (mci->bt_state != cur_bt_state) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI BT state mismatches. old: %d, new: %d\n",
+                       mci->bt_state, cur_bt_state);
+               mci->bt_state = cur_bt_state;
+       }
+
+       if (mci->bt_state != MCI_BT_SLEEP) {
+
+               ar9003_mci_send_coex_version_query(ah, true);
+               ar9003_mci_send_coex_wlan_channels(ah, true);
+
+               if (mci->unhalt_bt_gpm == true) {
+                       ath_dbg(common, ATH_DBG_MCI, "MCI unhalt BT GPM");
+                       ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+               }
+       }
+}
+
+static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 new_flags, to_set, to_clear;
+
+       if (AR_SREV_9462_20(ah) &&
+           mci->update_2g5g &&
+           (mci->bt_state != MCI_BT_SLEEP)) {
+
+               if (mci->is_2g) {
+                       new_flags = MCI_2G_FLAGS;
+                       to_clear = MCI_2G_FLAGS_CLEAR_MASK;
+                       to_set = MCI_2G_FLAGS_SET_MASK;
+               } else {
+                       new_flags = MCI_5G_FLAGS;
+                       to_clear = MCI_5G_FLAGS_CLEAR_MASK;
+                       to_set = MCI_5G_FLAGS_SET_MASK;
+               }
+
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI BT_MCI_FLAGS: %s 0x%08x clr=0x%08x, set=0x%08x\n",
+               mci->is_2g ? "2G" : "5G", new_flags, to_clear, to_set);
+
+               if (to_clear)
+                       ar9003_mci_send_coex_bt_flags(ah, wait_done,
+                                       MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear);
+
+               if (to_set)
+                       ar9003_mci_send_coex_bt_flags(ah, wait_done,
+                                       MCI_GPM_COEX_BT_FLAGS_SET, to_set);
+       }
+
+       if (AR_SREV_9462_10(ah) && (mci->bt_state != MCI_BT_SLEEP))
+               mci->update_2g5g = false;
+}
+
+static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
+                                       u32 *payload, bool queue)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u8 type, opcode;
+
+       if (queue) {
+
+               if (payload)
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI ERROR: Send fail: %02x: %02x %02x %02x\n",
+                               header,
+                               *(((u8 *)payload) + 4),
+                               *(((u8 *)payload) + 5),
+                               *(((u8 *)payload) + 6));
+               else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI ERROR: Send fail: %02x\n", header);
+       }
+
+       /* check if the message is to be queued */
+       if (header != MCI_GPM)
+               return;
+
+       type = MCI_GPM_TYPE(payload);
+       opcode = MCI_GPM_OPCODE(payload);
+
+       if (type != MCI_GPM_COEX_AGENT)
+               return;
+
+       switch (opcode) {
+       case MCI_GPM_COEX_BT_UPDATE_FLAGS:
+
+               if (AR_SREV_9462_10(ah))
+                       break;
+
+               if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) ==
+                               MCI_GPM_COEX_BT_FLAGS_READ)
+                       break;
+
+               mci->update_2g5g = queue;
+
+               if (queue)
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI BT_MCI_FLAGS: 2G5G status <queued> %s.\n",
+                               mci->is_2g ? "2G" : "5G");
+               else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI BT_MCI_FLAGS: 2G5G status <sent> %s.\n",
+                               mci->is_2g ? "2G" : "5G");
+
+               break;
+
+       case MCI_GPM_COEX_WLAN_CHANNELS:
+
+               mci->wlan_channels_update = queue;
+               if (queue)
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI WLAN channel map <queued>\n");
+               else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI WLAN channel map <sent>\n");
+               break;
+
+       case MCI_GPM_COEX_HALT_BT_GPM:
+
+               if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+                               MCI_GPM_COEX_BT_GPM_UNHALT) {
+
+                       mci->unhalt_bt_gpm = queue;
+
+                       if (queue)
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI UNHALT BT GPM <queued>\n");
+                       else {
+                               mci->halted_bt_gpm = false;
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI UNHALT BT GPM <sent>\n");
+                       }
+               }
+
+               if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+                               MCI_GPM_COEX_BT_GPM_HALT) {
+
+                       mci->halted_bt_gpm = !queue;
+
+                       if (queue)
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI HALT BT GPM <not sent>\n");
+                       else
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI UNHALT BT GPM <sent>\n");
+               }
+
+               break;
+       default:
+               break;
+       }
+}
+
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+       if (mci->update_2g5g) {
+               if (mci->is_2g) {
+
+                       ar9003_mci_send_2g5g_status(ah, true);
+                       ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA trans\n");
+                       ar9003_mci_send_lna_transfer(ah, true);
+                       udelay(5);
+
+                       REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+                                   AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+                       if (AR_SREV_9462_20(ah)) {
+                               REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
+                                           AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+                               if (!(mci->config &
+                                     ATH_MCI_CONFIG_DISABLE_OSLA)) {
+                                       REG_SET_BIT(ah, AR_BTCOEX_CTRL,
+                                       AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+                               }
+                       }
+               } else {
+                       ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA take\n");
+                       ar9003_mci_send_lna_take(ah, true);
+                       udelay(5);
+
+                       REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+                                   AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+                       if (AR_SREV_9462_20(ah)) {
+                               REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+                                           AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+                               REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
+                                       AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+                       }
+
+                       ar9003_mci_send_2g5g_status(ah, true);
+               }
+       }
+}
+
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+                            u32 *payload, u8 len, bool wait_done,
+                            bool check_bt)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       bool msg_sent = false;
+       u32 regval;
+       u32 saved_mci_int_en;
+       int i;
+
+       saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+       regval = REG_READ(ah, AR_BTCOEX_CTRL);
+
+       if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
+
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Not sending 0x%x. MCI is not enabled. "
+                       "full_sleep = %d\n", header,
+                       (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
+
+               ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+               return false;
+
+       } else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
+
+               ath_dbg(common, ATH_DBG_MCI,
+               "MCI Don't send message 0x%x. BT is in sleep state\n", header);
+
+               ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+               return false;
+       }
+
+       if (wait_done)
+               REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+
+       /* Need to clear SW_MSG_DONE raw bit before wait */
+
+       REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                 (AR_MCI_INTERRUPT_SW_MSG_DONE |
+                  AR_MCI_INTERRUPT_MSG_FAIL_MASK));
+
+       if (payload) {
+               for (i = 0; (i * 4) < len; i++)
+                       REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4),
+                                 *(payload + i));
+       }
+
+       REG_WRITE(ah, AR_MCI_COMMAND0,
+                 (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP),
+                     AR_MCI_COMMAND0_DISABLE_TIMESTAMP) |
+                  SM(len, AR_MCI_COMMAND0_LEN) |
+                  SM(header, AR_MCI_COMMAND0_HEADER)));
+
+       if (wait_done &&
+           !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
+                                       AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
+               ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+       else {
+               ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
+               msg_sent = true;
+       }
+
+       if (wait_done)
+               REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+
+       return msg_sent;
+}
+EXPORT_SYMBOL(ar9003_mci_send_message);
+
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+                     u16 len, u32 sched_addr)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       void *sched_buf = (void *)((char *) gpm_buf + (sched_addr - gpm_addr));
+
+       mci->gpm_addr = gpm_addr;
+       mci->gpm_buf = gpm_buf;
+       mci->gpm_len = len;
+       mci->sched_addr = sched_addr;
+       mci->sched_buf = sched_buf;
+
+       ar9003_mci_reset(ah, true, true, true);
+}
+EXPORT_SYMBOL(ar9003_mci_setup);
+
+void ar9003_mci_cleanup(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       /* Turn off MCI and Jupiter mode. */
+       REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
+       ath_dbg(common, ATH_DBG_MCI, "MCI ar9003_mci_cleanup\n");
+       ar9003_mci_disable_interrupt(ah);
+}
+EXPORT_SYMBOL(ar9003_mci_cleanup);
+
+static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
+                                        u8 gpm_opcode, u32 *p_gpm)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u8 *p_data = (u8 *) p_gpm;
+
+       if (gpm_type != MCI_GPM_COEX_AGENT)
+               return;
+
+       switch (gpm_opcode) {
+       case MCI_GPM_COEX_VERSION_QUERY:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Version Query\n");
+               ar9003_mci_send_coex_version_response(ah, true);
+               break;
+       case MCI_GPM_COEX_VERSION_RESPONSE:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Version Response\n");
+               mci->bt_ver_major =
+                       *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
+               mci->bt_ver_minor =
+                       *(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
+               mci->bt_version_known = true;
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI BT Coex version: %d.%d\n",
+                       mci->bt_ver_major,
+                       mci->bt_ver_minor);
+               break;
+       case MCI_GPM_COEX_STATUS_QUERY:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Status Query = 0x%02X.\n",
+                       *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
+               mci->wlan_channels_update = true;
+               ar9003_mci_send_coex_wlan_channels(ah, true);
+               break;
+       case MCI_GPM_COEX_BT_PROFILE_INFO:
+               mci->query_bt = true;
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX BT_Profile_Info\n");
+               break;
+       case MCI_GPM_COEX_BT_STATUS_UPDATE:
+               mci->query_bt = true;
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX BT_Status_Update "
+                       "SEQ=%d (drop&query)\n", *(p_gpm + 3));
+               break;
+       default:
+               break;
+       }
+}
+
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+                           u8 gpm_opcode, int time_out)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 *p_gpm = NULL, mismatch = 0, more_data;
+       u32 offset;
+       u8 recv_type = 0, recv_opcode = 0;
+       bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
+
+       more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
+
+       while (time_out > 0) {
+               if (p_gpm) {
+                       MCI_GPM_RECYCLE(p_gpm);
+                       p_gpm = NULL;
+               }
+
+               if (more_data != MCI_GPM_MORE)
+                       time_out = ar9003_mci_wait_for_interrupt(ah,
+                                       AR_MCI_INTERRUPT_RX_MSG_RAW,
+                                       AR_MCI_INTERRUPT_RX_MSG_GPM,
+                                       time_out);
+
+               if (!time_out)
+                       break;
+
+               offset = ar9003_mci_state(ah,
+                               MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+               if (offset == MCI_GPM_INVALID)
+                       continue;
+
+               p_gpm = (u32 *) (mci->gpm_buf + offset);
+               recv_type = MCI_GPM_TYPE(p_gpm);
+               recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+               if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
+
+                       if (recv_type == gpm_type) {
+
+                               if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
+                                   !b_is_bt_cal_done) {
+                                       gpm_type = MCI_GPM_BT_CAL_GRANT;
+                                       ath_dbg(common, ATH_DBG_MCI,
+                                               "MCI Recv BT_CAL_DONE"
+                                               "wait BT_CAL_GRANT\n");
+                                       continue;
+                               }
+
+                               break;
+                       }
+               } else if ((recv_type == gpm_type) &&
+                          (recv_opcode == gpm_opcode))
+                       break;
+
+               /* not expected message */
+
+               /*
+                * check if it's cal_grant
+                *
+                * When we're waiting for cal_grant in reset routine,
+                * it's possible that BT sends out cal_request at the
+                * same time. Since BT's calibration doesn't happen
+                * that often, we'll let BT completes calibration then
+                * we continue to wait for cal_grant from BT.
+                * Orginal: Wait BT_CAL_GRANT.
+                * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
+                * BT_CAL_DONE -> Wait BT_CAL_GRANT.
+                */
+
+               if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
+                   (recv_type == MCI_GPM_BT_CAL_REQ)) {
+
+                       u32 payload[4] = {0, 0, 0, 0};
+
+                       gpm_type = MCI_GPM_BT_CAL_DONE;
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI Rcv BT_CAL_REQ, send WLAN_CAL_GRANT\n");
+
+                       MCI_GPM_SET_CAL_TYPE(payload,
+                                       MCI_GPM_WLAN_CAL_GRANT);
+
+                       ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+                                               false, false);
+
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI now wait for BT_CAL_DONE\n");
+
+                       continue;
+               } else {
+                       ath_dbg(common, ATH_DBG_MCI, "MCI GPM subtype"
+                                       "not match 0x%x\n", *(p_gpm + 1));
+                       mismatch++;
+                       ar9003_mci_process_gpm_extra(ah, recv_type,
+                                       recv_opcode, p_gpm);
+               }
+       }
+       if (p_gpm) {
+               MCI_GPM_RECYCLE(p_gpm);
+               p_gpm = NULL;
+       }
+
+       if (time_out <= 0) {
+               time_out = 0;
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI GPM received timeout, mismatch = %d\n", mismatch);
+       } else
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Receive GPM type=0x%x, code=0x%x\n",
+                       gpm_type, gpm_opcode);
+
+       while (more_data == MCI_GPM_MORE) {
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI discard remaining GPM\n");
+               offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
+                                         &more_data);
+
+               if (offset == MCI_GPM_INVALID)
+                       break;
+
+               p_gpm = (u32 *) (mci->gpm_buf + offset);
+               recv_type = MCI_GPM_TYPE(p_gpm);
+               recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+               if (!MCI_GPM_IS_CAL_TYPE(recv_type))
+                       ar9003_mci_process_gpm_extra(ah, recv_type,
+                                                    recv_opcode, p_gpm);
+
+               MCI_GPM_RECYCLE(p_gpm);
+       }
+
+       return time_out;
+}
+
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 value = 0, more_gpm = 0, gpm_ptr;
+       u8 query_type;
+
+       switch (state_type) {
+       case MCI_STATE_ENABLE:
+               if (mci->ready) {
+
+                       value = REG_READ(ah, AR_BTCOEX_CTRL);
+
+                       if ((value == 0xdeadbeef) || (value == 0xffffffff))
+                               value = 0;
+               }
+               value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
+               break;
+       case MCI_STATE_INIT_GPM_OFFSET:
+               value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI GPM initial WRITE_PTR=%d\n", value);
+               mci->gpm_idx = value;
+               break;
+       case MCI_STATE_NEXT_GPM_OFFSET:
+       case MCI_STATE_LAST_GPM_OFFSET:
+               /*
+               * This could be useful to avoid new GPM message interrupt which
+               * may lead to spurious interrupt after power sleep, or multiple
+               * entry of ath_mci_intr().
+               * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
+               * alleviate this effect, but clearing GPM RX interrupt bit is
+               * safe, because whether this is called from hw or driver code
+               * there must be an interrupt bit set/triggered initially
+               */
+               REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                         AR_MCI_INTERRUPT_RX_MSG_GPM);
+
+               gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+               value = gpm_ptr;
+
+               if (value == 0)
+                       value = mci->gpm_len - 1;
+               else if (value >= mci->gpm_len) {
+                       if (value != 0xFFFF) {
+                               value = 0;
+                               ath_dbg(common, ATH_DBG_MCI, "MCI GPM offset"
+                                       "out of range\n");
+                       }
+               } else
+                       value--;
+
+               if (value == 0xFFFF) {
+                       value = MCI_GPM_INVALID;
+                       more_gpm = MCI_GPM_NOMORE;
+                       ath_dbg(common, ATH_DBG_MCI, "MCI GPM ptr invalid"
+                               "@ptr=%d, offset=%d, more=GPM_NOMORE\n",
+                               gpm_ptr, value);
+               } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
+
+                       if (gpm_ptr == mci->gpm_idx) {
+                               value = MCI_GPM_INVALID;
+                               more_gpm = MCI_GPM_NOMORE;
+
+                               ath_dbg(common, ATH_DBG_MCI, "MCI GPM message"
+                                       "not available @ptr=%d, @offset=%d,"
+                                       "more=GPM_NOMORE\n", gpm_ptr, value);
+                       } else {
+                               for (;;) {
+
+                                       u32 temp_index;
+
+                                       /* skip reserved GPM if any */
+
+                                       if (value != mci->gpm_idx)
+                                               more_gpm = MCI_GPM_MORE;
+                                       else
+                                               more_gpm = MCI_GPM_NOMORE;
+
+                                       temp_index = mci->gpm_idx;
+                                       mci->gpm_idx++;
+
+                                       if (mci->gpm_idx >=
+                                           mci->gpm_len)
+                                               mci->gpm_idx = 0;
+
+                                       ath_dbg(common, ATH_DBG_MCI,
+                                               "MCI GPM message got ptr=%d,"
+                                               "@offset=%d, more=%d\n",
+                                               gpm_ptr, temp_index,
+                                               (more_gpm == MCI_GPM_MORE));
+
+                                       if (ar9003_mci_is_gpm_valid(ah,
+                                                               temp_index)) {
+                                               value = temp_index;
+                                               break;
+                                       }
+
+                                       if (more_gpm == MCI_GPM_NOMORE) {
+                                               value = MCI_GPM_INVALID;
+                                               break;
+                                       }
+                               }
+                       }
+                       if (p_data)
+                               *p_data = more_gpm;
+                       }
+
+                       if (value != MCI_GPM_INVALID)
+                               value <<= 4;
+
+                       break;
+       case MCI_STATE_LAST_SCHD_MSG_OFFSET:
+               value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+                                   AR_MCI_RX_LAST_SCHD_MSG_INDEX);
+               /* Make it in bytes */
+               value <<= 4;
+               break;
+
+       case MCI_STATE_REMOTE_SLEEP:
+               value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+                          AR_MCI_RX_REMOTE_SLEEP) ?
+                       MCI_BT_SLEEP : MCI_BT_AWAKE;
+               break;
+
+       case MCI_STATE_CONT_RSSI_POWER:
+               value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
+                       break;
+
+       case MCI_STATE_CONT_PRIORITY:
+               value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
+               break;
+
+       case MCI_STATE_CONT_TXRX:
+               value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
+               break;
+
+       case MCI_STATE_BT:
+               value = mci->bt_state;
+               break;
+
+       case MCI_STATE_SET_BT_SLEEP:
+               mci->bt_state = MCI_BT_SLEEP;
+               break;
+
+       case MCI_STATE_SET_BT_AWAKE:
+               mci->bt_state = MCI_BT_AWAKE;
+               ar9003_mci_send_coex_version_query(ah, true);
+               ar9003_mci_send_coex_wlan_channels(ah, true);
+
+               if (mci->unhalt_bt_gpm) {
+
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI unhalt BT GPM\n");
+                       ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+               }
+
+               ar9003_mci_2g5g_switch(ah, true);
+               break;
+
+       case MCI_STATE_SET_BT_CAL_START:
+               mci->bt_state = MCI_BT_CAL_START;
+               break;
+
+       case MCI_STATE_SET_BT_CAL:
+               mci->bt_state = MCI_BT_CAL;
+               break;
+
+       case MCI_STATE_RESET_REQ_WAKE:
+               ar9003_mci_reset_req_wakeup(ah);
+               mci->update_2g5g = true;
+
+               if ((AR_SREV_9462_20_OR_LATER(ah)) &&
+                   (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK)) {
+                       /* Check if we still have control of the GPIOs */
+                       if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) &
+                                     ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
+                                       ATH_MCI_CONFIG_MCI_OBS_GPIO) {
+
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI reconfigure observation");
+                               ar9003_mci_observation_set_up(ah);
+                       }
+               }
+               break;
+
+       case MCI_STATE_SEND_WLAN_COEX_VERSION:
+               ar9003_mci_send_coex_version_response(ah, true);
+               break;
+
+       case MCI_STATE_SET_BT_COEX_VERSION:
+
+               if (!p_data)
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI Set BT Coex version with NULL data!!\n");
+               else {
+                       mci->bt_ver_major = (*p_data >> 8) & 0xff;
+                       mci->bt_ver_minor = (*p_data) & 0xff;
+                       mci->bt_version_known = true;
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI BT version set: %d.%d\n",
+                               mci->bt_ver_major,
+                               mci->bt_ver_minor);
+               }
+               break;
+
+       case MCI_STATE_SEND_WLAN_CHANNELS:
+               if (p_data) {
+                       if (((mci->wlan_channels[1] & 0xffff0000) ==
+                            (*(p_data + 1) & 0xffff0000)) &&
+                           (mci->wlan_channels[2] == *(p_data + 2)) &&
+                           (mci->wlan_channels[3] == *(p_data + 3)))
+                               break;
+
+                       mci->wlan_channels[0] = *p_data++;
+                       mci->wlan_channels[1] = *p_data++;
+                       mci->wlan_channels[2] = *p_data++;
+                       mci->wlan_channels[3] = *p_data++;
+               }
+               mci->wlan_channels_update = true;
+               ar9003_mci_send_coex_wlan_channels(ah, true);
+               break;
+
+       case MCI_STATE_SEND_VERSION_QUERY:
+               ar9003_mci_send_coex_version_query(ah, true);
+               break;
+
+       case MCI_STATE_SEND_STATUS_QUERY:
+               query_type = (AR_SREV_9462_10(ah)) ?
+                               MCI_GPM_COEX_QUERY_BT_ALL_INFO :
+                               MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
+
+               ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
+               break;
+
+       case MCI_STATE_NEED_FLUSH_BT_INFO:
+                       /*
+                        * btcoex_hw.mci.unhalt_bt_gpm means whether it's
+                        * needed to send UNHALT message. It's set whenever
+                        * there's a request to send HALT message.
+                        * mci_halted_bt_gpm means whether HALT message is sent
+                        * out successfully.
+                        *
+                        * Checking (mci_unhalt_bt_gpm == false) instead of
+                        * checking (ah->mci_halted_bt_gpm == false) will make
+                        * sure currently is in UNHALT-ed mode and BT can
+                        * respond to status query.
+                        */
+                       value = (!mci->unhalt_bt_gpm &&
+                                mci->need_flush_btinfo) ? 1 : 0;
+                       if (p_data)
+                               mci->need_flush_btinfo =
+                                       (*p_data != 0) ? true : false;
+                       break;
+
+       case MCI_STATE_RECOVER_RX:
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI hw RECOVER_RX\n");
+               ar9003_mci_prep_interface(ah);
+               mci->query_bt = true;
+               mci->need_flush_btinfo = true;
+               ar9003_mci_send_coex_wlan_channels(ah, true);
+               ar9003_mci_2g5g_switch(ah, true);
+               break;
+
+       case MCI_STATE_NEED_FTP_STOMP:
+               value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
+               break;
+
+       case MCI_STATE_NEED_TUNING:
+               value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
+               break;
+
+       default:
+               break;
+
+       }
+
+       return value;
+}
+EXPORT_SYMBOL(ar9003_mci_state);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
new file mode 100644 (file)
index 0000000..798da11
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MCI_H
+#define AR9003_MCI_H
+
+#define MCI_FLAG_DISABLE_TIMESTAMP      0x00000001      /* Disable time stamp */
+
+/* Default remote BT device MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT  3
+#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT  0
+
+/* Local WLAN MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_WLAN     3
+#define MCI_GPM_COEX_MINOR_VERSION_WLAN     0
+
+enum mci_gpm_coex_query_type {
+       MCI_GPM_COEX_QUERY_BT_ALL_INFO      = BIT(0),
+       MCI_GPM_COEX_QUERY_BT_TOPOLOGY      = BIT(1),
+       MCI_GPM_COEX_QUERY_BT_DEBUG         = BIT(2),
+};
+
+enum mci_gpm_coex_halt_bt_gpm {
+       MCI_GPM_COEX_BT_GPM_UNHALT,
+       MCI_GPM_COEX_BT_GPM_HALT
+};
+
+enum mci_gpm_coex_bt_update_flags_op {
+       MCI_GPM_COEX_BT_FLAGS_READ,
+       MCI_GPM_COEX_BT_FLAGS_SET,
+       MCI_GPM_COEX_BT_FLAGS_CLEAR
+};
+
+#define MCI_NUM_BT_CHANNELS     79
+
+#define MCI_BT_MCI_FLAGS_UPDATE_CORR          0x00000002
+#define MCI_BT_MCI_FLAGS_UPDATE_HDR           0x00000004
+#define MCI_BT_MCI_FLAGS_UPDATE_PLD           0x00000008
+#define MCI_BT_MCI_FLAGS_LNA_CTRL             0x00000010
+#define MCI_BT_MCI_FLAGS_DEBUG                0x00000020
+#define MCI_BT_MCI_FLAGS_SCHED_MSG            0x00000040
+#define MCI_BT_MCI_FLAGS_CONT_MSG             0x00000080
+#define MCI_BT_MCI_FLAGS_COEX_GPM             0x00000100
+#define MCI_BT_MCI_FLAGS_CPU_INT_MSG          0x00000200
+#define MCI_BT_MCI_FLAGS_MCI_MODE             0x00000400
+#define MCI_BT_MCI_FLAGS_AR9462_MODE          0x00001000
+#define MCI_BT_MCI_FLAGS_OTHER                0x00010000
+
+#define MCI_DEFAULT_BT_MCI_FLAGS              0x00011dde
+
+#define MCI_TOGGLE_BT_MCI_FLAGS  (MCI_BT_MCI_FLAGS_UPDATE_CORR | \
+                                 MCI_BT_MCI_FLAGS_UPDATE_HDR  | \
+                                 MCI_BT_MCI_FLAGS_UPDATE_PLD  | \
+                                 MCI_BT_MCI_FLAGS_MCI_MODE)
+
+#define MCI_2G_FLAGS_CLEAR_MASK   0x00000000
+#define MCI_2G_FLAGS_SET_MASK     MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_2G_FLAGS              MCI_DEFAULT_BT_MCI_FLAGS
+
+#define MCI_5G_FLAGS_CLEAR_MASK   MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_5G_FLAGS_SET_MASK     0x00000000
+#define MCI_5G_FLAGS              (MCI_DEFAULT_BT_MCI_FLAGS & \
+                                  ~MCI_TOGGLE_BT_MCI_FLAGS)
+
+/*
+ * Default value for AR9462 is 0x00002201
+ */
+#define ATH_MCI_CONFIG_CONCUR_TX            0x00000003
+#define ATH_MCI_CONFIG_MCI_OBS_MCI          0x00000004
+#define ATH_MCI_CONFIG_MCI_OBS_TXRX         0x00000008
+#define ATH_MCI_CONFIG_MCI_OBS_BT           0x00000010
+#define ATH_MCI_CONFIG_DISABLE_MCI_CAL      0x00000020
+#define ATH_MCI_CONFIG_DISABLE_OSLA         0x00000040
+#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP    0x00000080
+#define ATH_MCI_CONFIG_AGGR_THRESH          0x00000700
+#define ATH_MCI_CONFIG_AGGR_THRESH_S        8
+#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH  0x00000800
+#define ATH_MCI_CONFIG_CLK_DIV              0x00003000
+#define ATH_MCI_CONFIG_CLK_DIV_S            12
+#define ATH_MCI_CONFIG_DISABLE_TUNING       0x00004000
+#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG       0x40000000
+#define ATH_MCI_CONFIG_DISABLE_MCI          0x80000000
+
+#define ATH_MCI_CONFIG_MCI_OBS_MASK     (ATH_MCI_CONFIG_MCI_OBS_MCI  | \
+                                        ATH_MCI_CONFIG_MCI_OBS_TXRX | \
+                                        ATH_MCI_CONFIG_MCI_OBS_BT)
+#define ATH_MCI_CONFIG_MCI_OBS_GPIO     0x0000002F
+
+#endif
index 497d746..ed64114 100644 (file)
 #define AR_PHY_TEST_CTL_TSTADC_EN_S       8
 #define AR_PHY_TEST_CTL_RX_OBS_SEL        0x3C00
 #define AR_PHY_TEST_CTL_RX_OBS_SEL_S      10
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL    0xe0000000
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S          29
 
 
 #define AR_PHY_TSTDAC            (AR_SM_BASE + 0x168)
 
 /* GLB Registers */
 #define AR_GLB_BASE    0x20000
+#define AR_GLB_GPIO_CONTROL    (AR_GLB_BASE)
 #define AR_PHY_GLB_CONTROL     (AR_GLB_BASE + 0x44)
 #define AR_GLB_SCRATCH(_ah)    (AR_GLB_BASE + \
                                        (AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
index 259a6f3..dc2054f 100644 (file)
@@ -41,24 +41,24 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
 
 static const u32 ar9462_2p0_baseband_postamble[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-       {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
-       {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
-       {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x5ac640de},
-       {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x0796be89},
+       {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
+       {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+       {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+       {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
        {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
        {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
        {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
        {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
        {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
-       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
-       {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x92c84d2e},
-       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+       {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
        {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782},
-       {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+       {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
        {0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0},
@@ -81,6 +81,15 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
        {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
        {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000},
+       {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
+       {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00},
+       {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+       {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+       {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+       {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+       {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+       {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
        {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
        {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
        {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1107,11 +1116,11 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x15262820},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
-       {0x00009e54, 0xe4c355c7},
-       {0x00009e58, 0xfd897735},
+       {0x00009e54, 0xe4c555c2},
+       {0x00009e58, 0xfd857722},
        {0x00009e5c, 0xe9198724},
        {0x00009fc0, 0x803e4788},
        {0x00009fc4, 0x0001efb5},
@@ -1142,9 +1151,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
        {0x0000a398, 0x001f0e0f},
        {0x0000a39c, 0x0075393f},
        {0x0000a3a0, 0xb79f6427},
-       {0x0000a3a4, 0x00000000},
-       {0x0000a3a8, 0xaaaaaaaa},
-       {0x0000a3ac, 0x3c466478},
        {0x0000a3c0, 0x20202020},
        {0x0000a3c4, 0x22222220},
        {0x0000a3c8, 0x20200020},
@@ -1167,12 +1173,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
        {0x0000a40c, 0x00820820},
        {0x0000a414, 0x1ce739ce},
        {0x0000a418, 0x2d001dce},
-       {0x0000a41c, 0x1ce739ce},
-       {0x0000a420, 0x000001ce},
-       {0x0000a424, 0x1ce739ce},
-       {0x0000a428, 0x000001ce},
-       {0x0000a42c, 0x1ce739ce},
-       {0x0000a430, 0x1ce739ce},
        {0x0000a434, 0x00000000},
        {0x0000a438, 0x00001801},
        {0x0000a43c, 0x00100000},
index 93b45b4..130e5db 100644 (file)
@@ -159,6 +159,9 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
 /* return block-ack bitmap index given sequence and starting sequence */
 #define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
 
+/* return the seqno for _start + _offset */
+#define ATH_BA_INDEX2SEQ(_seq, _offset) (((_seq) + (_offset)) & (IEEE80211_SEQ_MAX - 1))
+
 /* returns delimiter padding required given the packet length */
 #define ATH_AGGR_GET_NDELIM(_len)                                      \
        (((_len) >= ATH_AGGR_MINPLEN) ? 0 :                             \
@@ -238,6 +241,7 @@ struct ath_atx_tid {
        struct ath_node *an;
        struct ath_atx_ac *ac;
        unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
+       int bar_index;
        u16 seq_start;
        u16 seq_next;
        u16 baw_size;
@@ -252,9 +256,9 @@ struct ath_atx_tid {
 struct ath_node {
 #ifdef CONFIG_ATH9K_DEBUGFS
        struct list_head list; /* for sc->nodes */
+#endif
        struct ieee80211_sta *sta; /* station struct we're part of */
        struct ieee80211_vif *vif; /* interface with which we're associated */
-#endif
        struct ath_atx_tid tid[WME_NUM_TID];
        struct ath_atx_ac ac[WME_NUM_AC];
        int ps_key;
@@ -276,7 +280,6 @@ struct ath_tx_control {
 };
 
 #define ATH_TX_ERROR        0x01
-#define ATH_TX_BAR          0x02
 
 /**
  * @txq_map:  Index is mac80211 queue number.  This is
@@ -462,7 +465,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
 #define ATH_LED_PIN_9287               8
 #define ATH_LED_PIN_9300               10
 #define ATH_LED_PIN_9485               6
-#define ATH_LED_PIN_9462               0
+#define ATH_LED_PIN_9462               4
 
 #ifdef CONFIG_MAC80211_LEDS
 void ath_init_leds(struct ath_softc *sc);
@@ -542,7 +545,7 @@ struct ath_ant_comb {
 #define DEFAULT_CACHELINE       32
 #define ATH_REGCLASSIDS_MAX     10
 #define ATH_CABQ_READY_TIME     80      /* % of beacon interval */
-#define ATH_MAX_SW_RETRIES      10
+#define ATH_MAX_SW_RETRIES      30
 #define ATH_CHAN_MAX            255
 
 #define ATH_TXPOWER_MAX         100     /* .5 dBm units */
@@ -647,6 +650,7 @@ struct ath_softc {
        struct delayed_work tx_complete_work;
        struct delayed_work hw_pll_work;
        struct ath_btcoex btcoex;
+       struct ath_mci_coex mci_coex;
 
        struct ath_descdma txsdma;
 
index 9ac28d9..bbb2081 100644 (file)
@@ -21,7 +21,7 @@ enum ath_bt_mode {
        ATH_BT_COEX_MODE_LEGACY,        /* legacy rx_clear mode */
        ATH_BT_COEX_MODE_UNSLOTTED,     /* untimed/unslotted mode */
        ATH_BT_COEX_MODE_SLOTTED,       /* slotted mode */
-       ATH_BT_COEX_MODE_DISALBED,      /* coexistence disabled */
+       ATH_BT_COEX_MODE_DISABLED,      /* coexistence disabled */
 };
 
 struct ath_btcoex_config {
index d5e5db1..278361c 100644 (file)
@@ -54,8 +54,39 @@ enum ath_btcoex_scheme {
        ATH_BTCOEX_CFG_MCI,
 };
 
+struct ath9k_hw_mci {
+       u32 raw_intr;
+       u32 rx_msg_intr;
+       u32 cont_status;
+       u32 gpm_addr;
+       u32 gpm_len;
+       u32 gpm_idx;
+       u32 sched_addr;
+       u32 wlan_channels[4];
+       u32 wlan_cal_seq;
+       u32 wlan_cal_done;
+       u32 config;
+       u8 *gpm_buf;
+       u8 *sched_buf;
+       bool ready;
+       bool update_2g5g;
+       bool is_2g;
+       bool query_bt;
+       bool unhalt_bt_gpm; /* need send UNHALT */
+       bool halted_bt_gpm; /* HALT sent */
+       bool need_flush_btinfo;
+       bool bt_version_known;
+       bool wlan_channels_update;
+       u8 wlan_ver_major;
+       u8 wlan_ver_minor;
+       u8 bt_ver_major;
+       u8 bt_ver_minor;
+       u8 bt_state;
+};
+
 struct ath_btcoex_hw {
        enum ath_btcoex_scheme scheme;
+       struct ath9k_hw_mci mci;
        bool enabled;
        u8 wlanactive_gpio;
        u8 btactive_gpio;
index 6fb719d..68d972b 100644 (file)
@@ -856,7 +856,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
        sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
 
        if (bf_isampdu(bf)) {
-               if (flags & ATH_TX_BAR)
+               if (flags & ATH_TX_ERROR)
                        TX_STAT_INC(qnum, a_xretries);
                else
                        TX_STAT_INC(qnum, a_completed);
@@ -1630,6 +1630,9 @@ int ath9k_init_debug(struct ath_hw *ah)
        debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
                            sc, &fops_debug);
 #endif
+
+       ath9k_dfs_init_debug(sc);
+
        debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
                            &fops_dma);
        debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
index 356352a..776a24a 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "hw.h"
 #include "rc.h"
+#include "dfs_debug.h"
 
 struct ath_txq;
 struct ath_buf;
@@ -187,6 +188,7 @@ struct ath_stats {
        struct ath_interrupt_stats istats;
        struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
        struct ath_rx_stats rxstats;
+       struct ath_dfs_stats dfs_stats;
        u32 reset[__RESET_TYPE_MAX];
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
new file mode 100644 (file)
index 0000000..e4e84a9
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ath9k.h"
+#include "dfs.h"
+#include "dfs_debug.h"
+
+/*
+ * TODO: move into or synchronize this with generic header
+ *      as soon as IF is defined
+ */
+struct dfs_radar_pulse {
+       u16 freq;
+       u64 ts;
+       u32 width;
+       u8 rssi;
+};
+
+/* internal struct to pass radar data */
+struct ath_radar_data {
+       u8 pulse_bw_info;
+       u8 rssi;
+       u8 ext_rssi;
+       u8 pulse_length_ext;
+       u8 pulse_length_pri;
+};
+
+/* convert pulse duration to usecs, considering clock mode */
+static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
+{
+       const u32 AR93X_NSECS_PER_DUR = 800;
+       const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11);
+       u32 nsecs;
+
+       if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan))
+               nsecs = dur * AR93X_NSECS_PER_DUR_FAST;
+       else
+               nsecs = dur * AR93X_NSECS_PER_DUR;
+
+       return (nsecs + 500) / 1000;
+}
+
+#define PRI_CH_RADAR_FOUND 0x01
+#define EXT_CH_RADAR_FOUND 0x02
+static bool
+ath9k_postprocess_radar_event(struct ath_softc *sc,
+                             struct ath_radar_data *are,
+                             struct dfs_radar_pulse *drp)
+{
+       u8 rssi;
+       u16 dur;
+
+       ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_DFS,
+               "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
+               are->pulse_bw_info,
+               are->pulse_length_pri, are->rssi,
+               are->pulse_length_ext, are->ext_rssi);
+
+       /*
+        * Only the last 2 bits of the BW info are relevant, they indicate
+        * which channel the radar was detected in.
+        */
+       are->pulse_bw_info &= 0x03;
+
+       switch (are->pulse_bw_info) {
+       case PRI_CH_RADAR_FOUND:
+               /* radar in ctrl channel */
+               dur = are->pulse_length_pri;
+               DFS_STAT_INC(sc, pri_phy_errors);
+               /*
+                * cannot use ctrl channel RSSI
+                * if extension channel is stronger
+                */
+               rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi;
+               break;
+       case EXT_CH_RADAR_FOUND:
+               /* radar in extension channel */
+               dur = are->pulse_length_ext;
+               DFS_STAT_INC(sc, ext_phy_errors);
+               /*
+                * cannot use extension channel RSSI
+                * if control channel is stronger
+                */
+               rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi;
+               break;
+       case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
+               /*
+                * Conducted testing, when pulse is on DC, both pri and ext
+                * durations are reported to be same
+                *
+                * Radiated testing, when pulse is on DC, different pri and
+                * ext durations are reported, so take the larger of the two
+                */
+               if (are->pulse_length_ext >= are->pulse_length_pri)
+                       dur = are->pulse_length_ext;
+               else
+                       dur = are->pulse_length_pri;
+               DFS_STAT_INC(sc, dc_phy_errors);
+
+               /* when both are present use stronger one */
+               rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi;
+               break;
+       default:
+               /*
+                * Bogus bandwidth info was received in descriptor,
+                * so ignore this PHY error
+                */
+               DFS_STAT_INC(sc, bwinfo_discards);
+               return false;
+       }
+
+       if (rssi == 0) {
+               DFS_STAT_INC(sc, rssi_discards);
+               return false;
+       }
+
+       /*
+        * TODO: check chirping pulses
+        *       checks for chirping are dependent on the DFS regulatory domain
+        *       used, which is yet TBD
+        */
+
+       /* convert duration to usecs */
+       drp->width = dur_to_usecs(sc->sc_ah, dur);
+       drp->rssi = rssi;
+
+       DFS_STAT_INC(sc, pulses_detected);
+       return true;
+}
+#undef PRI_CH_RADAR_FOUND
+#undef EXT_CH_RADAR_FOUND
+
+/*
+ * DFS: check PHY-error for radar pulse and feed the detector
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+                             struct ath_rx_status *rs, u64 mactime)
+{
+       struct ath_radar_data ard;
+       u16 datalen;
+       char *vdata_end;
+       struct dfs_radar_pulse drp;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) &&
+           (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) {
+               ath_dbg(common, ATH_DBG_DFS,
+                       "Error: rs_phyer=0x%x not a radar error\n",
+                       rs->rs_phyerr);
+               return;
+       }
+
+       datalen = rs->rs_datalen;
+       if (datalen == 0) {
+               DFS_STAT_INC(sc, datalen_discards);
+               return;
+       }
+
+       ard.rssi = rs->rs_rssi_ctl0;
+       ard.ext_rssi = rs->rs_rssi_ext0;
+
+       /*
+        * hardware stores this as 8 bit signed value.
+        * we will cap it at 0 if it is a negative number
+        */
+       if (ard.rssi & 0x80)
+               ard.rssi = 0;
+       if (ard.ext_rssi & 0x80)
+               ard.ext_rssi = 0;
+
+       vdata_end = (char *)data + datalen;
+       ard.pulse_bw_info = vdata_end[-1];
+       ard.pulse_length_ext = vdata_end[-2];
+       ard.pulse_length_pri = vdata_end[-3];
+
+       ath_dbg(common, ATH_DBG_DFS,
+               "bw_info=%d, length_pri=%d, length_ext=%d, "
+               "rssi_pri=%d, rssi_ext=%d\n",
+               ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
+               ard.rssi, ard.ext_rssi);
+
+       drp.freq = ah->curchan->channel;
+       drp.ts = mactime;
+       if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
+               static u64 last_ts;
+               ath_dbg(common, ATH_DBG_DFS,
+                       "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
+                       "width=%d, rssi=%d, delta_ts=%llu\n",
+                       drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts);
+               last_ts = drp.ts;
+               /*
+                * TODO: forward pulse to pattern detector
+                *
+                * ieee80211_add_radar_pulse(drp.freq, drp.ts,
+                *                           drp.width, drp.rssi);
+                */
+       }
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
new file mode 100644 (file)
index 0000000..c241285
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_DFS_H
+#define ATH9K_DFS_H
+
+#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
+/**
+ * ath9k_dfs_process_phyerr - process radar PHY error
+ * @sc: ath_softc
+ * @data: RX payload data
+ * @rs: RX status after processing descriptor
+ * @mactime: receive time
+ *
+ * This function is called whenever the HW DFS module detects a radar
+ * pulse and reports it as a PHY error.
+ *
+ * The radar information provided as raw payload data is validated and
+ * filtered for false pulses. Events passing all tests are forwarded to
+ * the upper layer for pattern detection.
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+                             struct ath_rx_status *rs, u64 mactime);
+#else
+static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+                                           struct ath_rx_status *rs, u64 mactime) { }
+#endif
+
+#endif /* ATH9K_DFS_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
new file mode 100644 (file)
index 0000000..106d031
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "ath9k.h"
+#include "dfs_debug.h"
+
+#define ATH9K_DFS_STAT(s, p) \
+       len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
+                       sc->debug.stats.dfs_stats.p);
+
+static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
+                            size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       struct ath9k_hw_version *hw_ver = &sc->sc_ah->hw_version;
+       char *buf;
+       unsigned int len = 0, size = 8000;
+       ssize_t retval = 0;
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       len += snprintf(buf + len, size - len, "DFS support for "
+                       "macVersion = 0x%x, macRev = 0x%x: %s\n",
+                       hw_ver->macVersion, hw_ver->macRev,
+                       (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+                                       "enabled" : "disabled");
+       ATH9K_DFS_STAT("DFS pulses detected     ", pulses_detected);
+       ATH9K_DFS_STAT("Datalen discards        ", datalen_discards);
+       ATH9K_DFS_STAT("RSSI discards           ", rssi_discards);
+       ATH9K_DFS_STAT("BW info discards        ", bwinfo_discards);
+       ATH9K_DFS_STAT("Primary channel pulses  ", pri_phy_errors);
+       ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
+       ATH9K_DFS_STAT("Dual channel pulses     ", dc_phy_errors);
+
+       if (len > size)
+               len = size;
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+}
+
+static int ath9k_dfs_debugfs_open(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+
+       return 0;
+}
+
+static const struct file_operations fops_dfs_stats = {
+       .read = read_file_dfs,
+       .open = ath9k_dfs_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+void ath9k_dfs_init_debug(struct ath_softc *sc)
+{
+       debugfs_create_file("dfs_stats", S_IRUSR,
+                           sc->debug.debugfs_phy, sc, &fops_dfs_stats);
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
new file mode 100644 (file)
index 0000000..6e1e2a7
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef DFS_DEBUG_H
+#define DFS_DEBUG_H
+
+#include "hw.h"
+
+/**
+ * struct ath_dfs_stats - DFS Statistics
+ *
+ * @pulses_detected:  No. of pulses detected so far
+ * @datalen_discards: No. of pulses discarded due to invalid datalen
+ * @rssi_discards:    No. of pulses discarded due to invalid RSSI
+ * @bwinfo_discards:  No. of pulses discarded due to invalid BW info
+ * @pri_phy_errors:   No. of pulses reported for primary channel
+ * @ext_phy_errors:   No. of pulses reported for extension channel
+ * @dc_phy_errors:    No. of pulses reported for primary + extension channel
+ */
+struct ath_dfs_stats {
+       u32 pulses_detected;
+       u32 datalen_discards;
+       u32 rssi_discards;
+       u32 bwinfo_discards;
+       u32 pri_phy_errors;
+       u32 ext_phy_errors;
+       u32 dc_phy_errors;
+};
+
+#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
+
+#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
+void ath9k_dfs_init_debug(struct ath_softc *sc);
+
+#else
+
+#define DFS_STAT_INC(sc, c) do { } while (0)
+static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
+
+#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
+
+#endif /* DFS_DEBUG_H */
index 9a7520f..61fcab0 100644 (file)
@@ -473,7 +473,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
 
        int i;
        u16 twiceMinEdgePower;
-       u16 twiceMaxEdgePower = MAX_RATE_POWER;
+       u16 twiceMaxEdgePower;
        u16 scaledPower = 0, minCtlPower;
        u16 numCtlModes;
        const u16 *pCtlMode;
@@ -542,9 +542,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
                else
                        freq = centers.ctl_center;
 
-               if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
-                   ah->eep_ops->get_eeprom_rev(ah) <= 2)
-                       twiceMaxEdgePower = MAX_RATE_POWER;
+               twiceMaxEdgePower = MAX_RATE_POWER;
 
                for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
                             pEepData->ctlIndex[i]; i++) {
index 4f5c50a..0981c07 100644 (file)
@@ -569,7 +569,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
 #define REDUCE_SCALED_POWER_BY_TWO_CHAIN     6
 #define REDUCE_SCALED_POWER_BY_THREE_CHAIN   10
 
-       u16 twiceMaxEdgePower = MAX_RATE_POWER;
+       u16 twiceMaxEdgePower;
        int i;
        struct cal_ctl_data_ar9287 *rep;
        struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
@@ -669,6 +669,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
                else
                        freq = centers.ctl_center;
 
+               twiceMaxEdgePower = MAX_RATE_POWER;
                /* Walk through the CTL indices stored in EEPROM */
                for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
                        struct cal_ctl_edges *pRdEdgesPower;
index 81e6296..9681c09 100644 (file)
@@ -385,7 +385,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
        if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
            ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
            (eep->baseEepHeader.pwdclkind == 0))
-               ah->need_an_top2_fixup = 1;
+               ah->need_an_top2_fixup = true;
 
        if ((common->bus_ops->ath_bus_type == ATH_USB) &&
            (AR_SREV_9280(ah)))
@@ -1000,7 +1000,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
 #define REDUCE_SCALED_POWER_BY_THREE_CHAIN   9 /* 10*log10(3)*2 */
 
        struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
-       u16 twiceMaxEdgePower = MAX_RATE_POWER;
+       u16 twiceMaxEdgePower;
        int i;
        struct cal_ctl_data *rep;
        struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
@@ -1121,9 +1121,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
                else
                        freq = centers.ctl_center;
 
-               if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
-                   ah->eep_ops->get_eeprom_rev(ah) <= 2)
-                       twiceMaxEdgePower = MAX_RATE_POWER;
+               twiceMaxEdgePower = MAX_RATE_POWER;
 
                for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
                        if ((((cfgCtl & ~CTL_MODE_M) |
index 0b9a0e8..f8ce4ea 100644 (file)
@@ -808,7 +808,8 @@ void ath9k_htc_ani_work(struct work_struct *work)
        }
 
        /* Verify whether we must check ANI */
-       if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+       if (ah->config.enable_ani &&
+           (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
                aniflag = true;
                common->ani.checkani_timer = timestamp;
        }
@@ -838,7 +839,7 @@ set_timer:
        * short calibration and long calibration.
        */
        cal_interval = ATH_LONG_CALINTERVAL;
-       if (priv->ah->config.enable_ani)
+       if (ah->config.enable_ani)
                cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
        if (!common->ani.caldone)
                cal_interval = min(cal_interval, (u32)short_cal_interval);
index e74c233..c4ad0b0 100644 (file)
@@ -212,4 +212,13 @@ static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah,
        return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan,
                                                          ini_reloaded);
 }
+
+static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
+{
+       if (!ath9k_hw_private_ops(ah)->set_radar_params)
+               return;
+
+       ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
+}
+
 #endif /* ATH9K_HW_OPS_H */
index 8876134..8cda9a1 100644 (file)
@@ -504,7 +504,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
                return ecode;
        }
 
-       if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) {
+       if (ah->config.enable_ani) {
                ath9k_hw_ani_setup(ah);
                ath9k_hw_ani_init(ah);
        }
@@ -610,6 +610,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
        if (!AR_SREV_9300_20_OR_LATER(ah))
                ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
 
+       /* disable ANI for 9340 */
+       if (AR_SREV_9340(ah))
+               ah->config.enable_ani = false;
+
        ath9k_hw_init_mode_regs(ah);
 
        if (!ah->is_pciexpress)
@@ -1350,6 +1354,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
 
 static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
 {
+       bool ret = false;
 
        if (AR_SREV_9300_20_OR_LATER(ah)) {
                REG_WRITE(ah, AR_WA, ah->WARegVal);
@@ -1361,13 +1366,20 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
 
        switch (type) {
        case ATH9K_RESET_POWER_ON:
-               return ath9k_hw_set_reset_power_on(ah);
+               ret = ath9k_hw_set_reset_power_on(ah);
+               break;
        case ATH9K_RESET_WARM:
        case ATH9K_RESET_COLD:
-               return ath9k_hw_set_reset(ah, type);
+               ret = ath9k_hw_set_reset(ah, type);
+               break;
        default:
-               return false;
+               break;
        }
+
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+               REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
+       return ret;
 }
 
 static bool ath9k_hw_chip_reset(struct ath_hw *ah,
@@ -1506,6 +1518,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                   struct ath9k_hw_cal_data *caldata, bool bChannelChange)
 {
        struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
        u32 saveLedState;
        struct ath9k_channel *curchan = ah->curchan;
        u32 saveDefAntenna;
@@ -1513,6 +1526,53 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        u64 tsf = 0;
        int i, r;
        bool allow_fbs = false;
+       bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
+       bool save_fullsleep = ah->chip_fullsleep;
+
+       if (mci) {
+
+               ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
+
+               if (mci_hw->bt_state == MCI_BT_CAL_START) {
+                       u32 payload[4] = {0, 0, 0, 0};
+
+                       ath_dbg(common, ATH_DBG_MCI, "MCI stop rx for BT CAL");
+
+                       mci_hw->bt_state = MCI_BT_CAL;
+
+                       /*
+                        * MCI FIX: disable mci interrupt here. This is to avoid
+                        * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
+                        * lead to mci_intr reentry.
+                        */
+
+                       ar9003_mci_disable_interrupt(ah);
+
+                       ath_dbg(common, ATH_DBG_MCI, "send WLAN_CAL_GRANT");
+                       MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
+                       ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
+                                               16, true, false);
+
+                       ath_dbg(common, ATH_DBG_MCI, "\nMCI BT is calibrating");
+
+                       /* Wait BT calibration to be completed for 25ms */
+
+                       if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
+                                                                 0, 25000))
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI got BT_CAL_DONE\n");
+                       else
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI ### BT cal takes to long, force"
+                                       "bt_state to be bt_awake\n");
+                       mci_hw->bt_state = MCI_BT_AWAKE;
+                       /* MCI FIX: enable mci interrupt here */
+                       ar9003_mci_enable_interrupt(ah);
+
+                       return true;
+               }
+       }
+
 
        if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
                return -EIO;
@@ -1550,12 +1610,29 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                if (ath9k_hw_channel_change(ah, chan)) {
                        ath9k_hw_loadnf(ah, ah->curchan);
                        ath9k_hw_start_nfcal(ah, true);
+                       if (mci && mci_hw->ready)
+                               ar9003_mci_2g5g_switch(ah, true);
+
                        if (AR_SREV_9271(ah))
                                ar9002_hw_load_ani_reg(ah, chan);
                        return 0;
                }
        }
 
+       if (mci) {
+               ar9003_mci_disable_interrupt(ah);
+
+               if (mci_hw->ready && !save_fullsleep) {
+                       ar9003_mci_mute_bt(ah);
+                       udelay(20);
+                       REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
+               }
+
+               mci_hw->bt_state = MCI_BT_SLEEP;
+               mci_hw->ready = false;
+       }
+
+
        saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
        if (saveDefAntenna == 0)
                saveDefAntenna = 1;
@@ -1611,6 +1688,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        if (r)
                return r;
 
+       if (mci)
+               ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
+
        /*
         * Some AR91xx SoC devices frequently fail to accept TSF writes
         * right after the chip reset. When that happens, write a new
@@ -1728,6 +1808,55 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        ath9k_hw_loadnf(ah, chan);
        ath9k_hw_start_nfcal(ah, true);
 
+       if (mci && mci_hw->ready) {
+
+               if (IS_CHAN_2GHZ(chan) &&
+                   (mci_hw->bt_state == MCI_BT_SLEEP)) {
+
+                       if (ar9003_mci_check_int(ah,
+                           AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
+                           ar9003_mci_check_int(ah,
+                           AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
+
+                               /*
+                                * BT is sleeping. Check if BT wakes up during
+                                * WLAN calibration. If BT wakes up during
+                                * WLAN calibration, need to go through all
+                                * message exchanges again and recal.
+                                */
+
+                               ath_dbg(common, ATH_DBG_MCI, "MCI BT wakes up"
+                                       "during WLAN calibration\n");
+
+                               REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                                         AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+                                         AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
+                               ath_dbg(common, ATH_DBG_MCI, "MCI send"
+                                       "REMOTE_RESET\n");
+                               ar9003_mci_remote_reset(ah, true);
+                               ar9003_mci_send_sys_waking(ah, true);
+                               udelay(1);
+                               if (IS_CHAN_2GHZ(chan))
+                                       ar9003_mci_send_lna_transfer(ah, true);
+
+                               mci_hw->bt_state = MCI_BT_AWAKE;
+
+                               ath_dbg(common, ATH_DBG_MCI, "MCI re-cal\n");
+
+                               if (caldata) {
+                                       caldata->done_txiqcal_once = false;
+                                       caldata->done_txclcal_once = false;
+                                       caldata->rtt_hist.num_readings = 0;
+                               }
+
+                               if (!ath9k_hw_init_cal(ah, chan))
+                                       return -EIO;
+
+                       }
+               }
+               ar9003_mci_enable_interrupt(ah);
+       }
+
        ENABLE_REGWRITE_BUFFER(ah);
 
        ath9k_hw_restore_chainmask(ah);
@@ -1770,6 +1899,21 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        if (ah->btcoex_hw.enabled)
                ath9k_hw_btcoex_enable(ah);
 
+       if (mci && mci_hw->ready) {
+               /*
+                * check BT state again to make
+                * sure it's not changed.
+                */
+
+               ar9003_mci_sync_bt_state(ah);
+               ar9003_mci_2g5g_switch(ah, true);
+
+               if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+                               (mci_hw->query_bt == true)) {
+                       mci_hw->need_flush_btinfo = true;
+               }
+       }
+
        if (AR_SREV_9300_20_OR_LATER(ah)) {
                ar9003_hw_bb_watchdog_config(ah);
 
@@ -1934,6 +2078,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
 bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
 {
        struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
        int status = true, setChip = true;
        static const char *modes[] = {
                "AWAKE",
@@ -1951,12 +2096,35 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
        switch (mode) {
        case ATH9K_PM_AWAKE:
                status = ath9k_hw_set_power_awake(ah, setChip);
+
+               if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+                       REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
                break;
        case ATH9K_PM_FULL_SLEEP:
+
+               if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+                       if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
+                               (mci->bt_state != MCI_BT_SLEEP) &&
+                               !mci->halted_bt_gpm) {
+                               ath_dbg(common, ATH_DBG_MCI, "MCI halt BT GPM"
+                                               "(full_sleep)");
+                               ar9003_mci_send_coex_halt_bt_gpm(ah,
+                                                                true, true);
+                       }
+
+                       mci->ready = false;
+                       REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+               }
+
                ath9k_set_power_sleep(ah, setChip);
                ah->chip_fullsleep = true;
                break;
        case ATH9K_PM_NETWORK_SLEEP:
+
+               if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+                       REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
                ath9k_set_power_network_sleep(ah, setChip);
                break;
        default:
@@ -2109,6 +2277,30 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
                return chip_chainmask;
 }
 
+/**
+ * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset
+ * @ah: the atheros hardware data structure
+ *
+ * We enable DFS support upstream on chipsets which have passed a series
+ * of tests. The testing requirements are going to be documented. Desired
+ * test requirements are documented at:
+ *
+ * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
+ *
+ * Once a new chipset gets properly tested an individual commit can be used
+ * to document the testing for DFS for that chipset.
+ */
+static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
+{
+
+       switch (ah->hw_version.macVersion) {
+       /* AR9580 will likely be our first target to get testing on */
+       case AR_SREV_VERSION_9580:
+       default:
+               return false;
+       }
+}
+
 int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 {
        struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2149,6 +2341,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 
        if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
                chip_chainmask = 1;
+       else if (AR_SREV_9462(ah))
+               chip_chainmask = 3;
        else if (!AR_SREV_9280_20_OR_LATER(ah))
                chip_chainmask = 7;
        else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
@@ -2205,12 +2399,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
        else
                pCap->num_gpio_pins = AR_NUM_GPIO;
 
-       if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
-               pCap->hw_caps |= ATH9K_HW_CAP_CST;
+       if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
                pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
-       } else {
+       else
                pCap->rts_aggr_limit = (8 * 1024);
-       }
 
 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
        ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
@@ -2234,7 +2426,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
                pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
 
        if (common->btcoex_enabled) {
-               if (AR_SREV_9300_20_OR_LATER(ah)) {
+               if (AR_SREV_9462(ah))
+                       btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+               else if (AR_SREV_9300_20_OR_LATER(ah)) {
                        btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
                        btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
                        btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -2318,6 +2512,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
                pCap->pcie_lcr_offset = 0x80;
        }
 
+       if (ath9k_hw_dfs_tested(ah))
+               pCap->hw_caps |= ATH9K_HW_CAP_DFS;
+
        tx_chainmask = pCap->tx_chainmask;
        rx_chainmask = pCap->rx_chainmask;
        while (tx_chainmask || rx_chainmask) {
@@ -2332,7 +2529,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 
        if (AR_SREV_9300_20_OR_LATER(ah)) {
                ah->enabled_cals |= TX_IQ_CAL;
-               if (!AR_SREV_9330(ah))
+               if (AR_SREV_9485_OR_LATER(ah))
                        ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
        }
        if (AR_SREV_9462(ah))
index 3cb878c..615cc83 100644 (file)
 #define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL  4
 #define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED    5
 #define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED      6
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA      0x16
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK       0x17
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA        0x18
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK         0x19
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX           0x14
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX           0x13
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX           9
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX           8
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE      0x1d
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA        0x1e
 
 #define AR_GPIOD_MASK               0x00001FFF
 #define AR_GPIO_BIT(_gpio)          (1 << (_gpio))
@@ -186,21 +196,21 @@ enum ath_ini_subsys {
 enum ath9k_hw_caps {
        ATH9K_HW_CAP_HT                         = BIT(0),
        ATH9K_HW_CAP_RFSILENT                   = BIT(1),
-       ATH9K_HW_CAP_CST                        = BIT(2),
-       ATH9K_HW_CAP_AUTOSLEEP                  = BIT(4),
-       ATH9K_HW_CAP_4KB_SPLITTRANS             = BIT(5),
-       ATH9K_HW_CAP_EDMA                       = BIT(6),
-       ATH9K_HW_CAP_RAC_SUPPORTED              = BIT(7),
-       ATH9K_HW_CAP_LDPC                       = BIT(8),
-       ATH9K_HW_CAP_FASTCLOCK                  = BIT(9),
-       ATH9K_HW_CAP_SGI_20                     = BIT(10),
-       ATH9K_HW_CAP_PAPRD                      = BIT(11),
-       ATH9K_HW_CAP_ANT_DIV_COMB               = BIT(12),
-       ATH9K_HW_CAP_2GHZ                       = BIT(13),
-       ATH9K_HW_CAP_5GHZ                       = BIT(14),
-       ATH9K_HW_CAP_APM                        = BIT(15),
-       ATH9K_HW_CAP_RTT                        = BIT(16),
-       ATH9K_HW_CAP_MCI                        = BIT(17),
+       ATH9K_HW_CAP_AUTOSLEEP                  = BIT(2),
+       ATH9K_HW_CAP_4KB_SPLITTRANS             = BIT(3),
+       ATH9K_HW_CAP_EDMA                       = BIT(4),
+       ATH9K_HW_CAP_RAC_SUPPORTED              = BIT(5),
+       ATH9K_HW_CAP_LDPC                       = BIT(6),
+       ATH9K_HW_CAP_FASTCLOCK                  = BIT(7),
+       ATH9K_HW_CAP_SGI_20                     = BIT(8),
+       ATH9K_HW_CAP_PAPRD                      = BIT(9),
+       ATH9K_HW_CAP_ANT_DIV_COMB               = BIT(10),
+       ATH9K_HW_CAP_2GHZ                       = BIT(11),
+       ATH9K_HW_CAP_5GHZ                       = BIT(12),
+       ATH9K_HW_CAP_APM                        = BIT(13),
+       ATH9K_HW_CAP_RTT                        = BIT(14),
+       ATH9K_HW_CAP_MCI                        = BIT(15),
+       ATH9K_HW_CAP_DFS                        = BIT(16),
 };
 
 struct ath9k_hw_capabilities {
@@ -266,6 +276,7 @@ enum ath9k_int {
        ATH9K_INT_TX = 0x00000040,
        ATH9K_INT_TXDESC = 0x00000080,
        ATH9K_INT_TIM_TIMER = 0x00000100,
+       ATH9K_INT_MCI = 0x00000200,
        ATH9K_INT_BB_WATCHDOG = 0x00000400,
        ATH9K_INT_TXURN = 0x00000800,
        ATH9K_INT_MIB = 0x00001000,
@@ -417,6 +428,25 @@ enum ath9k_rx_qtype {
        ATH9K_RX_QUEUE_MAX,
 };
 
+enum mci_message_header {              /* length of payload */
+       MCI_LNA_CTRL     = 0x10,        /* len = 0 */
+       MCI_CONT_NACK    = 0x20,        /* len = 0 */
+       MCI_CONT_INFO    = 0x30,        /* len = 4 */
+       MCI_CONT_RST     = 0x40,        /* len = 0 */
+       MCI_SCHD_INFO    = 0x50,        /* len = 16 */
+       MCI_CPU_INT      = 0x60,        /* len = 4 */
+       MCI_SYS_WAKING   = 0x70,        /* len = 0 */
+       MCI_GPM          = 0x80,        /* len = 16 */
+       MCI_LNA_INFO     = 0x90,        /* len = 1 */
+       MCI_LNA_STATE    = 0x94,
+       MCI_LNA_TAKE     = 0x98,
+       MCI_LNA_TRANS    = 0x9c,
+       MCI_SYS_SLEEPING = 0xa0,        /* len = 0 */
+       MCI_REQ_WAKE     = 0xc0,        /* len = 0 */
+       MCI_DEBUG_16     = 0xfe,        /* len = 2 */
+       MCI_REMOTE_RESET = 0xff         /* len = 16 */
+};
+
 enum ath_mci_gpm_coex_profile_type {
        MCI_GPM_COEX_PROFILE_UNKNOWN,
        MCI_GPM_COEX_PROFILE_RFCOMM,
@@ -427,6 +457,132 @@ enum ath_mci_gpm_coex_profile_type {
        MCI_GPM_COEX_PROFILE_MAX
 };
 
+/* MCI GPM/Coex opcode/type definitions */
+enum {
+       MCI_GPM_COEX_W_GPM_PAYLOAD      = 1,
+       MCI_GPM_COEX_B_GPM_TYPE         = 4,
+       MCI_GPM_COEX_B_GPM_OPCODE       = 5,
+       /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
+       MCI_GPM_WLAN_CAL_W_SEQUENCE     = 2,
+
+       /* MCI_GPM_COEX_VERSION_QUERY */
+       /* MCI_GPM_COEX_VERSION_RESPONSE */
+       MCI_GPM_COEX_B_MAJOR_VERSION    = 6,
+       MCI_GPM_COEX_B_MINOR_VERSION    = 7,
+       /* MCI_GPM_COEX_STATUS_QUERY */
+       MCI_GPM_COEX_B_BT_BITMAP        = 6,
+       MCI_GPM_COEX_B_WLAN_BITMAP      = 7,
+       /* MCI_GPM_COEX_HALT_BT_GPM */
+       MCI_GPM_COEX_B_HALT_STATE       = 6,
+       /* MCI_GPM_COEX_WLAN_CHANNELS */
+       MCI_GPM_COEX_B_CHANNEL_MAP      = 6,
+       /* MCI_GPM_COEX_BT_PROFILE_INFO */
+       MCI_GPM_COEX_B_PROFILE_TYPE     = 6,
+       MCI_GPM_COEX_B_PROFILE_LINKID   = 7,
+       MCI_GPM_COEX_B_PROFILE_STATE    = 8,
+       MCI_GPM_COEX_B_PROFILE_ROLE     = 9,
+       MCI_GPM_COEX_B_PROFILE_RATE     = 10,
+       MCI_GPM_COEX_B_PROFILE_VOTYPE   = 11,
+       MCI_GPM_COEX_H_PROFILE_T        = 12,
+       MCI_GPM_COEX_B_PROFILE_W        = 14,
+       MCI_GPM_COEX_B_PROFILE_A        = 15,
+       /* MCI_GPM_COEX_BT_STATUS_UPDATE */
+       MCI_GPM_COEX_B_STATUS_TYPE      = 6,
+       MCI_GPM_COEX_B_STATUS_LINKID    = 7,
+       MCI_GPM_COEX_B_STATUS_STATE     = 8,
+       /* MCI_GPM_COEX_BT_UPDATE_FLAGS */
+       MCI_GPM_COEX_W_BT_FLAGS         = 6,
+       MCI_GPM_COEX_B_BT_FLAGS_OP      = 10
+};
+
+enum mci_gpm_subtype {
+       MCI_GPM_BT_CAL_REQ      = 0,
+       MCI_GPM_BT_CAL_GRANT    = 1,
+       MCI_GPM_BT_CAL_DONE     = 2,
+       MCI_GPM_WLAN_CAL_REQ    = 3,
+       MCI_GPM_WLAN_CAL_GRANT  = 4,
+       MCI_GPM_WLAN_CAL_DONE   = 5,
+       MCI_GPM_COEX_AGENT      = 0x0c,
+       MCI_GPM_RSVD_PATTERN    = 0xfe,
+       MCI_GPM_RSVD_PATTERN32  = 0xfefefefe,
+       MCI_GPM_BT_DEBUG        = 0xff
+};
+
+enum mci_bt_state {
+       MCI_BT_SLEEP,
+       MCI_BT_AWAKE,
+       MCI_BT_CAL_START,
+       MCI_BT_CAL
+};
+
+/* Type of state query */
+enum mci_state_type {
+       MCI_STATE_ENABLE,
+       MCI_STATE_INIT_GPM_OFFSET,
+       MCI_STATE_NEXT_GPM_OFFSET,
+       MCI_STATE_LAST_GPM_OFFSET,
+       MCI_STATE_BT,
+       MCI_STATE_SET_BT_SLEEP,
+       MCI_STATE_SET_BT_AWAKE,
+       MCI_STATE_SET_BT_CAL_START,
+       MCI_STATE_SET_BT_CAL,
+       MCI_STATE_LAST_SCHD_MSG_OFFSET,
+       MCI_STATE_REMOTE_SLEEP,
+       MCI_STATE_CONT_RSSI_POWER,
+       MCI_STATE_CONT_PRIORITY,
+       MCI_STATE_CONT_TXRX,
+       MCI_STATE_RESET_REQ_WAKE,
+       MCI_STATE_SEND_WLAN_COEX_VERSION,
+       MCI_STATE_SET_BT_COEX_VERSION,
+       MCI_STATE_SEND_WLAN_CHANNELS,
+       MCI_STATE_SEND_VERSION_QUERY,
+       MCI_STATE_SEND_STATUS_QUERY,
+       MCI_STATE_NEED_FLUSH_BT_INFO,
+       MCI_STATE_SET_CONCUR_TX_PRI,
+       MCI_STATE_RECOVER_RX,
+       MCI_STATE_NEED_FTP_STOMP,
+       MCI_STATE_NEED_TUNING,
+       MCI_STATE_DEBUG,
+       MCI_STATE_MAX
+};
+
+enum mci_gpm_coex_opcode {
+       MCI_GPM_COEX_VERSION_QUERY,
+       MCI_GPM_COEX_VERSION_RESPONSE,
+       MCI_GPM_COEX_STATUS_QUERY,
+       MCI_GPM_COEX_HALT_BT_GPM,
+       MCI_GPM_COEX_WLAN_CHANNELS,
+       MCI_GPM_COEX_BT_PROFILE_INFO,
+       MCI_GPM_COEX_BT_STATUS_UPDATE,
+       MCI_GPM_COEX_BT_UPDATE_FLAGS
+};
+
+#define MCI_GPM_NOMORE  0
+#define MCI_GPM_MORE    1
+#define MCI_GPM_INVALID 0xffffffff
+
+#define MCI_GPM_RECYCLE(_p_gpm)        do {                      \
+       *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
+                               MCI_GPM_RSVD_PATTERN32;   \
+} while (0)
+
+#define MCI_GPM_TYPE(_p_gpm)   \
+       (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
+
+#define MCI_GPM_OPCODE(_p_gpm) \
+       (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
+
+#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type)        do {                       \
+       *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
+} while (0)
+
+#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do {              \
+       *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff;    \
+       *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
+} while (0)
+
+#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
+
 struct ath9k_beacon_state {
        u32 bs_nexttbtt;
        u32 bs_nextdtim;
@@ -954,7 +1110,6 @@ bool ath9k_hw_disable(struct ath_hw *ah);
 void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
 void ath9k_hw_setopmode(struct ath_hw *ah);
 void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
-void ath9k_hw_setbssidmask(struct ath_hw *ah);
 void ath9k_hw_write_associd(struct ath_hw *ah);
 u32 ath9k_hw_gettsf32(struct ath_hw *ah);
 u64 ath9k_hw_gettsf64(struct ath_hw *ah);
@@ -1047,6 +1202,32 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
 void ath9k_hw_proc_mib_event(struct ath_hw *ah);
 void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
 
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+                            u32 *payload, u8 len, bool wait_done,
+                            bool check_bt);
+void ar9003_mci_mute_bt(struct ath_hw *ah);
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+                     u16 len, u32 sched_addr);
+void ar9003_mci_cleanup(struct ath_hw *ah);
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+                                     bool wait_done);
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+                           u8 gpm_opcode, int time_out);
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g);
+void ar9003_mci_disable_interrupt(struct ath_hw *ah);
+void ar9003_mci_enable_interrupt(struct ath_hw *ah);
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+                     bool is_full_sleep);
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints);
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_sync_bt_state(struct ath_hw *ah);
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+                             u32 *rx_msg_intr);
+
 #define ATH9K_CLOCK_RATE_CCK           22
 #define ATH9K_CLOCK_RATE_5GHZ_OFDM     40
 #define ATH9K_CLOCK_RATE_2GHZ_OFDM     44
index e046de9..c5df981 100644 (file)
@@ -258,6 +258,8 @@ static void setup_ht_cap(struct ath_softc *sc,
 
        if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
                max_streams = 1;
+       else if (AR_SREV_9462(ah))
+               max_streams = 2;
        else if (AR_SREV_9300_20_OR_LATER(ah))
                max_streams = 3;
        else
@@ -295,9 +297,22 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
 {
        struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
        struct ath_softc *sc = hw->priv;
-       struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+       int ret;
+
+       ret = ath_reg_notifier_apply(wiphy, request, reg);
+
+       /* Set tx power */
+       if (ah->curchan) {
+               sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
+               ath9k_ps_wakeup(sc);
+               ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
+               sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
+               ath9k_ps_restore(sc);
+       }
 
-       return ath_reg_notifier_apply(wiphy, request, reg);
+       return ret;
 }
 
 /*
@@ -408,6 +423,7 @@ fail:
 static int ath9k_init_btcoex(struct ath_softc *sc)
 {
        struct ath_txq *txq;
+       struct ath_hw *ah = sc->sc_ah;
        int r;
 
        switch (sc->sc_ah->btcoex_hw.scheme) {
@@ -423,9 +439,38 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
                        return -1;
                txq = sc->tx.txq_map[WME_AC_BE];
                ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
+               sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+               break;
+       case ATH_BTCOEX_CFG_MCI:
                sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
                sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
                INIT_LIST_HEAD(&sc->btcoex.mci.info);
+
+               r = ath_mci_setup(sc);
+               if (r)
+                       return r;
+
+               if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+                       ah->btcoex_hw.mci.ready = false;
+                       ah->btcoex_hw.mci.bt_state = 0;
+                       ah->btcoex_hw.mci.bt_ver_major = 3;
+                       ah->btcoex_hw.mci.bt_ver_minor = 0;
+                       ah->btcoex_hw.mci.bt_version_known = false;
+                       ah->btcoex_hw.mci.update_2g5g = true;
+                       ah->btcoex_hw.mci.is_2g = true;
+                       ah->btcoex_hw.mci.wlan_channels_update = false;
+                       ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
+                       ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
+                       ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
+                       ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
+                       ah->btcoex_hw.mci.query_bt = true;
+                       ah->btcoex_hw.mci.unhalt_bt_gpm = true;
+                       ah->btcoex_hw.mci.halted_bt_gpm = false;
+                       ah->btcoex_hw.mci.need_flush_btinfo = false;
+                       ah->btcoex_hw.mci.wlan_cal_seq = 0;
+                       ah->btcoex_hw.mci.wlan_cal_done = 0;
+                       ah->btcoex_hw.mci.config = 0x2201;
+               }
                break;
        default:
                WARN_ON(1);
@@ -839,6 +884,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
            sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
                ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
 
+       if (sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_MCI)
+               ath_mci_cleanup(sc);
+
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
                if (ATH_TXQ_SETUP(sc, i))
                        ath_tx_cleanupq(sc, &sc->tx.txq[i]);
index ecdb6fd..0e4fbb3 100644 (file)
@@ -760,7 +760,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
                return true;
 
        host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
-       if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+
+       if (((host_isr & AR_INTR_MAC_IRQ) ||
+            (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
+           (host_isr != AR_INTR_SPURIOUS))
                return true;
 
        host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
@@ -798,6 +801,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
        u32 sync_default = AR_INTR_SYNC_DEFAULT;
+       u32 async_mask;
 
        if (!(ah->imask & ATH9K_INT_GLOBAL))
                return;
@@ -812,13 +816,16 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
        if (AR_SREV_9340(ah))
                sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
 
+       async_mask = AR_INTR_MAC_IRQ;
+
+       if (ah->imask & ATH9K_INT_MCI)
+               async_mask |= AR_INTR_ASYNC_MASK_MCI;
+
        ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
        REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
        if (!AR_SREV_9100(ah)) {
-               REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
-                         AR_INTR_MAC_IRQ);
-               REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
+               REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
+               REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
 
                REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
                REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
index e43c41c..7fbc4bd 100644 (file)
@@ -118,7 +118,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
        if (--sc->ps_usecount != 0)
                goto unlock;
 
-       if (sc->ps_idle)
+       if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
                mode = ATH9K_PM_FULL_SLEEP;
        else if (sc->ps_enabled &&
                 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
                        ath_start_ani(common);
        }
 
-       if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+       if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
                struct ath_hw_antcomb_conf div_ant_conf;
                u8 lna_conf;
 
@@ -332,7 +332,8 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
                hchan = ah->curchan;
        }
 
-       if (fastcc && !ath9k_hw_check_alive(ah))
+       if (fastcc && (ah->chip_fullsleep ||
+           !ath9k_hw_check_alive(ah)))
                fastcc = false;
 
        if (!ath_prepare_reset(sc, retry_tx, flush))
@@ -561,7 +562,6 @@ void ath_ani_calibrate(unsigned long data)
        /* Long calibration runs independently of short calibration. */
        if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
                longcal = true;
-               ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
                common->ani.longcal_timer = timestamp;
        }
 
@@ -569,8 +569,6 @@ void ath_ani_calibrate(unsigned long data)
        if (!common->ani.caldone) {
                if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
                        shortcal = true;
-                       ath_dbg(common, ATH_DBG_ANI,
-                               "shortcal @%lu\n", jiffies);
                        common->ani.shortcal_timer = timestamp;
                        common->ani.resetcal_timer = timestamp;
                }
@@ -584,8 +582,9 @@ void ath_ani_calibrate(unsigned long data)
        }
 
        /* Verify whether we must check ANI */
-       if ((timestamp - common->ani.checkani_timer) >=
-            ah->config.ani_poll_interval) {
+       if (sc->sc_ah->config.enable_ani
+           && (timestamp - common->ani.checkani_timer) >=
+           ah->config.ani_poll_interval) {
                aniflag = true;
                common->ani.checkani_timer = timestamp;
        }
@@ -605,6 +604,11 @@ void ath_ani_calibrate(unsigned long data)
                                                ah->rxchainmask, longcal);
        }
 
+       ath_dbg(common, ATH_DBG_ANI,
+               "Calibration @%lu finished: %s %s %s, caldone: %s\n", jiffies,
+               longcal ? "long" : "", shortcal ? "short" : "",
+               aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
+
        ath9k_ps_restore(sc);
 
 set_timer:
@@ -640,9 +644,9 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
        spin_lock(&sc->nodes_lock);
        list_add(&an->list, &sc->nodes);
        spin_unlock(&sc->nodes_lock);
+#endif
        an->sta = sta;
        an->vif = vif;
-#endif
        if (sc->sc_flags & SC_OP_TXAGGR) {
                ath_tx_node_init(sc, an);
                an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
@@ -742,6 +746,9 @@ void ath9k_tasklet(unsigned long data)
                if (status & ATH9K_INT_GENTIMER)
                        ath_gen_timer_isr(sc->sc_ah);
 
+       if (status & ATH9K_INT_MCI)
+               ath_mci_intr(sc);
+
 out:
        /* re-enable hardware interrupt */
        ath9k_hw_enable_interrupts(ah);
@@ -764,7 +771,8 @@ irqreturn_t ath_isr(int irq, void *dev)
                ATH9K_INT_BMISS |               \
                ATH9K_INT_CST |                 \
                ATH9K_INT_TSFOOR |              \
-               ATH9K_INT_GENTIMER)
+               ATH9K_INT_GENTIMER |            \
+               ATH9K_INT_MCI)
 
        struct ath_softc *sc = dev;
        struct ath_hw *ah = sc->sc_ah;
@@ -882,82 +890,6 @@ chip_reset:
 #undef SCHED_INTR
 }
 
-static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-       struct ieee80211_channel *channel = hw->conf.channel;
-       int r;
-
-       ath9k_ps_wakeup(sc);
-       spin_lock_bh(&sc->sc_pcu_lock);
-       atomic_set(&ah->intr_ref_cnt, -1);
-
-       ath9k_hw_configpcipowersave(ah, false);
-
-       if (!ah->curchan)
-               ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
-
-       r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
-       if (r) {
-               ath_err(common,
-                       "Unable to reset channel (%u MHz), reset status %d\n",
-                       channel->center_freq, r);
-       }
-
-       ath_complete_reset(sc, true);
-
-       /* Enable LED */
-       ath9k_hw_cfg_output(ah, ah->led_pin,
-                           AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-       ath9k_hw_set_gpio(ah, ah->led_pin, 0);
-
-       spin_unlock_bh(&sc->sc_pcu_lock);
-
-       ath9k_ps_restore(sc);
-}
-
-void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
-       struct ath_hw *ah = sc->sc_ah;
-       struct ieee80211_channel *channel = hw->conf.channel;
-       int r;
-
-       ath9k_ps_wakeup(sc);
-
-       ath_cancel_work(sc);
-
-       spin_lock_bh(&sc->sc_pcu_lock);
-
-       /*
-        * Keep the LED on when the radio is disabled
-        * during idle unassociated state.
-        */
-       if (!sc->ps_idle) {
-               ath9k_hw_set_gpio(ah, ah->led_pin, 1);
-               ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
-       }
-
-       ath_prepare_reset(sc, false, true);
-
-       if (!ah->curchan)
-               ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
-       r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
-       if (r) {
-               ath_err(ath9k_hw_common(sc->sc_ah),
-                       "Unable to reset channel (%u MHz), reset status %d\n",
-                       channel->center_freq, r);
-       }
-
-       ath9k_hw_phy_disable(ah);
-
-       ath9k_hw_configpcipowersave(ah, true);
-
-       spin_unlock_bh(&sc->sc_pcu_lock);
-       ath9k_ps_restore(sc);
-}
-
 static int ath_reset(struct ath_softc *sc, bool retry_tx)
 {
        int r;
@@ -1093,6 +1025,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
         * and then setup of the interrupt mask.
         */
        spin_lock_bh(&sc->sc_pcu_lock);
+
+       atomic_set(&ah->intr_ref_cnt, -1);
+
        r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
        if (r) {
                ath_err(common,
@@ -1119,6 +1054,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
        if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
                ah->imask |= ATH9K_INT_CST;
 
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+               ah->imask |= ATH9K_INT_MCI;
+
        sc->sc_flags &= ~SC_OP_INVALID;
        sc->sc_ah->is_monitoring = false;
 
@@ -1131,6 +1069,18 @@ static int ath9k_start(struct ieee80211_hw *hw)
                goto mutex_unlock;
        }
 
+       if (ah->led_pin >= 0) {
+               ath9k_hw_cfg_output(ah, ah->led_pin,
+                                   AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+               ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+       }
+
+       /*
+        * Reset key cache to sane defaults (all entries cleared) instead of
+        * semi-random values after suspend/resume.
+        */
+       ath9k_cmn_init_crypto(sc->sc_ah);
+
        spin_unlock_bh(&sc->sc_pcu_lock);
 
        if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
@@ -1176,6 +1126,13 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                }
        }
 
+       /*
+        * Cannot tx while the hardware is in full sleep, it first needs a full
+        * chip reset to recover from that
+        */
+       if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP))
+               goto exit;
+
        if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
                /*
                 * We are using PS-Poll and mac80211 can request TX while in
@@ -1222,6 +1179,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
+       bool prev_idle;
 
        mutex_lock(&sc->mutex);
 
@@ -1252,35 +1210,45 @@ static void ath9k_stop(struct ieee80211_hw *hw)
         * before setting the invalid flag. */
        ath9k_hw_disable_interrupts(ah);
 
-       if (!(sc->sc_flags & SC_OP_INVALID)) {
-               ath_drain_all_txq(sc, false);
-               ath_stoprecv(sc);
-               ath9k_hw_phy_disable(ah);
-       } else
-               sc->rx.rxlink = NULL;
+       spin_unlock_bh(&sc->sc_pcu_lock);
+
+       /* we can now sync irq and kill any running tasklets, since we already
+        * disabled interrupts and not holding a spin lock */
+       synchronize_irq(sc->irq);
+       tasklet_kill(&sc->intr_tq);
+       tasklet_kill(&sc->bcon_tasklet);
+
+       prev_idle = sc->ps_idle;
+       sc->ps_idle = true;
+
+       spin_lock_bh(&sc->sc_pcu_lock);
+
+       if (ah->led_pin >= 0) {
+               ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+               ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+       }
+
+       ath_prepare_reset(sc, false, true);
 
        if (sc->rx.frag) {
                dev_kfree_skb_any(sc->rx.frag);
                sc->rx.frag = NULL;
        }
 
-       /* disable HAL and put h/w to sleep */
-       ath9k_hw_disable(ah);
+       if (!ah->curchan)
+               ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+
+       ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+       ath9k_hw_phy_disable(ah);
 
-       spin_unlock_bh(&sc->sc_pcu_lock);
+       ath9k_hw_configpcipowersave(ah, true);
 
-       /* we can now sync irq and kill any running tasklets, since we already
-        * disabled interrupts and not holding a spin lock */
-       synchronize_irq(sc->irq);
-       tasklet_kill(&sc->intr_tq);
-       tasklet_kill(&sc->bcon_tasklet);
+       spin_unlock_bh(&sc->sc_pcu_lock);
 
        ath9k_ps_restore(sc);
 
-       sc->ps_idle = true;
-       ath_radio_disable(sc, hw);
-
        sc->sc_flags |= SC_OP_INVALID;
+       sc->ps_idle = prev_idle;
 
        mutex_unlock(&sc->mutex);
 
@@ -1620,8 +1588,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_conf *conf = &hw->conf;
-       bool disable_radio = false;
 
+       ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
        /*
@@ -1632,13 +1600,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
         */
        if (changed & IEEE80211_CONF_CHANGE_IDLE) {
                sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-               if (!sc->ps_idle) {
-                       ath_radio_enable(sc, hw);
-                       ath_dbg(common, ATH_DBG_CONFIG,
-                               "not-idle: enabling radio\n");
-               } else {
-                       disable_radio = true;
-               }
+               if (sc->ps_idle)
+                       ath_cancel_work(sc);
        }
 
        /*
@@ -1745,18 +1708,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                ath_dbg(common, ATH_DBG_CONFIG,
                        "Set power: %d\n", conf->power_level);
                sc->config.txpowlimit = 2 * conf->power_level;
-               ath9k_ps_wakeup(sc);
                ath9k_cmn_update_txpow(ah, sc->curtxpow,
                                       sc->config.txpowlimit, &sc->curtxpow);
-               ath9k_ps_restore(sc);
-       }
-
-       if (disable_radio) {
-               ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
-               ath_radio_disable(sc, hw);
        }
 
        mutex_unlock(&sc->mutex);
+       ath9k_ps_restore(sc);
 
        return 0;
 }
@@ -1916,7 +1873,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
        if (ath9k_modparam_nohwcrypt)
                return -ENOSPC;
 
-       if (vif->type == NL80211_IFTYPE_ADHOC &&
+       if ((vif->type == NL80211_IFTYPE_ADHOC ||
+            vif->type == NL80211_IFTYPE_MESH_POINT) &&
            (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
             key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
            !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
@@ -2324,9 +2282,6 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
                return;
        }
 
-       if (drop)
-               timeout = 1;
-
        for (j = 0; j < timeout; j++) {
                bool npend = false;
 
@@ -2344,21 +2299,22 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
                }
 
                if (!npend)
-                   goto out;
+                   break;
        }
 
-       ath9k_ps_wakeup(sc);
-       spin_lock_bh(&sc->sc_pcu_lock);
-       drain_txq = ath_drain_all_txq(sc, false);
-       spin_unlock_bh(&sc->sc_pcu_lock);
+       if (drop) {
+               ath9k_ps_wakeup(sc);
+               spin_lock_bh(&sc->sc_pcu_lock);
+               drain_txq = ath_drain_all_txq(sc, false);
+               spin_unlock_bh(&sc->sc_pcu_lock);
 
-       if (!drain_txq)
-               ath_reset(sc, false);
+               if (!drain_txq)
+                       ath_reset(sc, false);
 
-       ath9k_ps_restore(sc);
-       ieee80211_wake_queues(hw);
+               ath9k_ps_restore(sc);
+               ieee80211_wake_queues(hw);
+       }
 
-out:
        ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
        mutex_unlock(&sc->mutex);
 }
index 0fbb141..691bf47 100644 (file)
@@ -14,6 +14,9 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
 #include "ath9k.h"
 #include "mci.h"
 
@@ -181,8 +184,58 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
        ath9k_btcoex_timer_resume(sc);
 }
 
-void ath_mci_process_profile(struct ath_softc *sc,
-                            struct ath_mci_profile_info *info)
+
+static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       u32 payload[4] = {0, 0, 0, 0};
+
+       switch (opcode) {
+       case MCI_GPM_BT_CAL_REQ:
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_REQ\n");
+
+               if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+                       ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
+                       ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+               } else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI State mismatches: %d\n",
+                               ar9003_mci_state(ah, MCI_STATE_BT, NULL));
+
+               break;
+
+       case MCI_GPM_BT_CAL_DONE:
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_DONE\n");
+
+               if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL)
+                       ath_dbg(common, ATH_DBG_MCI, "MCI error illegal!\n");
+               else
+                       ath_dbg(common, ATH_DBG_MCI, "MCI BT not in CAL state\n");
+
+               break;
+
+       case MCI_GPM_BT_CAL_GRANT:
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_GRANT\n");
+
+               /* Send WLAN_CAL_DONE for now */
+               ath_dbg(common, ATH_DBG_MCI, "MCI send WLAN_CAL_DONE\n");
+               MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
+               ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
+                                       16, false, true);
+               break;
+
+       default:
+               ath_dbg(common, ATH_DBG_MCI, "MCI Unknown GPM CAL message\n");
+               break;
+       }
+}
+
+static void ath_mci_process_profile(struct ath_softc *sc,
+                                   struct ath_mci_profile_info *info)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_btcoex *btcoex = &sc->btcoex;
@@ -208,8 +261,8 @@ void ath_mci_process_profile(struct ath_softc *sc,
        ath_mci_update_scheme(sc);
 }
 
-void ath_mci_process_status(struct ath_softc *sc,
-                           struct ath_mci_profile_status *status)
+static void ath_mci_process_status(struct ath_softc *sc,
+                                  struct ath_mci_profile_status *status)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_btcoex *btcoex = &sc->btcoex;
@@ -252,3 +305,369 @@ void ath_mci_process_status(struct ath_softc *sc,
        if (old_num_mgmt != mci->num_mgmt)
                ath_mci_update_scheme(sc);
 }
+
+static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_mci_profile_info profile_info;
+       struct ath_mci_profile_status profile_status;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       u32 version;
+       u8 major;
+       u8 minor;
+       u32 seq_num;
+
+       switch (opcode) {
+
+       case MCI_GPM_COEX_VERSION_QUERY:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Version Query.\n");
+               version = ar9003_mci_state(ah,
+                               MCI_STATE_SEND_WLAN_COEX_VERSION, NULL);
+               break;
+
+       case MCI_GPM_COEX_VERSION_RESPONSE:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Version Response.\n");
+               major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
+               minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI BT Coex version: %d.%d\n", major, minor);
+               version = (major << 8) + minor;
+               version = ar9003_mci_state(ah,
+                         MCI_STATE_SET_BT_COEX_VERSION, &version);
+               break;
+
+       case MCI_GPM_COEX_STATUS_QUERY:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Status Query = 0x%02x.\n",
+                       *(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP));
+               ar9003_mci_state(ah,
+               MCI_STATE_SEND_WLAN_CHANNELS, NULL);
+               break;
+
+       case MCI_GPM_COEX_BT_PROFILE_INFO:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM Coex BT profile info\n");
+               memcpy(&profile_info,
+                      (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
+
+               if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN)
+                   || (profile_info.type >=
+                                           MCI_GPM_COEX_PROFILE_MAX)) {
+
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "illegal profile type = %d,"
+                               "state = %d\n", profile_info.type,
+                               profile_info.start);
+                       break;
+               }
+
+               ath_mci_process_profile(sc, &profile_info);
+               break;
+
+       case MCI_GPM_COEX_BT_STATUS_UPDATE:
+               profile_status.is_link = *(rx_payload +
+                                          MCI_GPM_COEX_B_STATUS_TYPE);
+               profile_status.conn_handle = *(rx_payload +
+                                              MCI_GPM_COEX_B_STATUS_LINKID);
+               profile_status.is_critical = *(rx_payload +
+                                              MCI_GPM_COEX_B_STATUS_STATE);
+
+               seq_num = *((u32 *)(rx_payload + 12));
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX BT_Status_Update: "
+                       "is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
+                       profile_status.is_link, profile_status.conn_handle,
+                       profile_status.is_critical, seq_num);
+
+               ath_mci_process_status(sc, &profile_status);
+               break;
+
+       default:
+               ath_dbg(common, ATH_DBG_MCI,
+               "MCI Unknown GPM COEX message = 0x%02x\n", opcode);
+               break;
+       }
+}
+
+static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+       int error = 0;
+
+       buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len,
+                                         &buf->bf_paddr, GFP_KERNEL);
+
+       if (buf->bf_addr == NULL) {
+               error = -ENOMEM;
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       memset(buf, 0, sizeof(*buf));
+       return error;
+}
+
+static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+       if (buf->bf_addr) {
+               dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr,
+                                                       buf->bf_paddr);
+               memset(buf, 0, sizeof(*buf));
+       }
+}
+
+int ath_mci_setup(struct ath_softc *sc)
+{
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_mci_coex *mci = &sc->mci_coex;
+       int error = 0;
+
+       mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE;
+
+       if (ath_mci_buf_alloc(sc, &mci->sched_buf)) {
+               ath_dbg(common, ATH_DBG_FATAL, "MCI buffer alloc failed\n");
+               error = -ENOMEM;
+               goto fail;
+       }
+
+       mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
+
+       memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN,
+                                               mci->sched_buf.bf_len);
+
+       mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
+       mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr +
+                                                       mci->sched_buf.bf_len;
+       mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
+
+       /* initialize the buffer */
+       memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len);
+
+       ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
+                        mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
+                        mci->sched_buf.bf_paddr);
+fail:
+       return error;
+}
+
+void ath_mci_cleanup(struct ath_softc *sc)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_mci_coex *mci = &sc->mci_coex;
+
+       /*
+        * both schedule and gpm buffers will be released
+        */
+       ath_mci_buf_free(sc, &mci->sched_buf);
+       ar9003_mci_cleanup(ah);
+}
+
+void ath_mci_intr(struct ath_softc *sc)
+{
+       struct ath_mci_coex *mci = &sc->mci_coex;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       u32 mci_int, mci_int_rxmsg;
+       u32 offset, subtype, opcode;
+       u32 *pgpm;
+       u32 more_data = MCI_GPM_MORE;
+       bool skip_gpm = false;
+
+       ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
+
+       if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
+
+               ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI interrupt but MCI disabled\n");
+
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n",
+                       mci_int, mci_int_rxmsg);
+               return;
+       }
+
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
+               u32 payload[4] = { 0xffffffff, 0xffffffff,
+                                  0xffffffff, 0xffffff00};
+
+               /*
+                * The following REMOTE_RESET and SYS_WAKING used to sent
+                * only when BT wake up. Now they are always sent, as a
+                * recovery method to reset BT MCI's RX alignment.
+                */
+               ath_dbg(common, ATH_DBG_MCI, "MCI interrupt send REMOTE_RESET\n");
+
+               ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
+                                       payload, 16, true, false);
+               ath_dbg(common, ATH_DBG_MCI, "MCI interrupt send SYS_WAKING\n");
+               ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
+                                       NULL, 0, true, false);
+
+               mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
+               ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
+
+               /*
+                * always do this for recovery and 2G/5G toggling and LNA_TRANS
+                */
+               ath_dbg(common, ATH_DBG_MCI, "MCI Set BT state to AWAKE.\n");
+               ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
+       }
+
+       /* Processing SYS_WAKING/SYS_SLEEPING */
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
+               mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
+
+               if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
+
+                       if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+                                       == MCI_BT_SLEEP)
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI BT stays in sleep mode\n");
+                       else {
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI Set BT state to AWAKE.\n");
+                               ar9003_mci_state(ah,
+                                                MCI_STATE_SET_BT_AWAKE, NULL);
+                       }
+               } else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI BT stays in AWAKE mode.\n");
+       }
+
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
+
+               mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
+
+               if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+
+                       if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+                                       == MCI_BT_AWAKE)
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI BT stays in AWAKE mode.\n");
+                       else {
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI SetBT state to SLEEP\n");
+                               ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
+                                                NULL);
+                       }
+               } else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI BT stays in SLEEP mode\n");
+       }
+
+       if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+           (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI RX broken, skip GPM msgs\n");
+               ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
+               skip_gpm = true;
+       }
+
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
+
+               mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
+               offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
+                                         NULL);
+       }
+
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
+
+               mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
+
+               while (more_data == MCI_GPM_MORE) {
+
+                       pgpm = mci->gpm_buf.bf_addr;
+                       offset = ar9003_mci_state(ah,
+                                       MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+                       if (offset == MCI_GPM_INVALID)
+                               break;
+
+                       pgpm += (offset >> 2);
+
+                       /*
+                        * The first dword is timer.
+                        * The real data starts from 2nd dword.
+                        */
+
+                       subtype = MCI_GPM_TYPE(pgpm);
+                       opcode = MCI_GPM_OPCODE(pgpm);
+
+                       if (!skip_gpm) {
+
+                               if (MCI_GPM_IS_CAL_TYPE(subtype))
+                                       ath_mci_cal_msg(sc, subtype,
+                                                       (u8 *) pgpm);
+                               else {
+                                       switch (subtype) {
+                                       case MCI_GPM_COEX_AGENT:
+                                               ath_mci_msg(sc, opcode,
+                                                           (u8 *) pgpm);
+                                               break;
+                                       default:
+                                               break;
+                                       }
+                               }
+                       }
+                       MCI_GPM_RECYCLE(pgpm);
+               }
+       }
+
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
+
+               if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
+                       mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
+
+               if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) {
+                       mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
+                       ath_dbg(common, ATH_DBG_MCI, "MCI LNA_INFO\n");
+               }
+
+               if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
+
+                       int value_dbm = ar9003_mci_state(ah,
+                                       MCI_STATE_CONT_RSSI_POWER, NULL);
+
+                       mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
+
+                       if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI CONT_INFO: "
+                                       "(tx) pri = %d, pwr = %d dBm\n",
+                                       ar9003_mci_state(ah,
+                                               MCI_STATE_CONT_PRIORITY, NULL),
+                                       value_dbm);
+                       else
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI CONT_INFO:"
+                                       "(rx) pri = %d,pwr = %d dBm\n",
+                                       ar9003_mci_state(ah,
+                                               MCI_STATE_CONT_PRIORITY, NULL),
+                                       value_dbm);
+               }
+
+               if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) {
+                       mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
+                       ath_dbg(common, ATH_DBG_MCI, "MCI CONT_NACK\n");
+               }
+
+               if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) {
+                       mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
+                       ath_dbg(common, ATH_DBG_MCI, "MCI CONT_RST\n");
+               }
+       }
+
+       if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+           (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
+               mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
+                            AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
+
+       if (mci_int_rxmsg & 0xfffffffe)
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI not processed mci_int_rxmsg = 0x%x\n",
+                       mci_int_rxmsg);
+}
index 9590c61..29e3e51 100644 (file)
@@ -17,6 +17,9 @@
 #ifndef MCI_H
 #define MCI_H
 
+#define ATH_MCI_SCHED_BUF_SIZE         (16 * 16) /* 16 entries, 4 dword each */
+#define ATH_MCI_GPM_MAX_ENTRY          16
+#define ATH_MCI_GPM_BUF_SIZE           (ATH_MCI_GPM_MAX_ENTRY * 16)
 #define ATH_MCI_DEF_BT_PERIOD          40
 #define ATH_MCI_BDR_DUTY_CYCLE         20
 #define ATH_MCI_MAX_DUTY_CYCLE         90
@@ -110,9 +113,22 @@ struct ath_mci_profile {
        u8 num_bdr;
 };
 
+
+struct ath_mci_buf {
+       void *bf_addr;          /* virtual addr of desc */
+       dma_addr_t bf_paddr;    /* physical addr of buffer */
+       u32 bf_len;             /* len of data */
+};
+
+struct ath_mci_coex {
+       atomic_t mci_cal_flag;
+       struct ath_mci_buf sched_buf;
+       struct ath_mci_buf gpm_buf;
+       u32 bt_cal_start;
+};
+
 void ath_mci_flush_profile(struct ath_mci_profile *mci);
-void ath_mci_process_profile(struct ath_softc *sc,
-                            struct ath_mci_profile_info *info);
-void ath_mci_process_status(struct ath_softc *sc,
-                           struct ath_mci_profile_status *status);
+int ath_mci_setup(struct ath_softc *sc);
+void ath_mci_cleanup(struct ath_softc *sc);
+void ath_mci_intr(struct ath_softc *sc);
 #endif
index 2dcdf63..a439edc 100644 (file)
@@ -307,12 +307,11 @@ static int ath_pci_suspend(struct device *device)
        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
        struct ath_softc *sc = hw->priv;
 
-       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
        /* The device has to be moved to FULLSLEEP forcibly.
         * Otherwise the chip never moved to full sleep,
         * when no interface is up.
         */
+       ath9k_hw_disable(sc->sc_ah);
        ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
 
        return 0;
@@ -321,8 +320,6 @@ static int ath_pci_suspend(struct device *device)
 static int ath_pci_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
-       struct ieee80211_hw *hw = pci_get_drvdata(pdev);
-       struct ath_softc *sc = hw->priv;
        u32 val;
 
        /*
@@ -334,22 +331,6 @@ static int ath_pci_resume(struct device *device)
        if ((val & 0x0000ff00) != 0)
                pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
-       ath9k_ps_wakeup(sc);
-       /* Enable LED */
-       ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
-                           AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
-
-         /*
-          * Reset key cache to sane defaults (all entries cleared) instead of
-          * semi-random values after suspend/resume.
-          */
-       ath9k_cmn_init_crypto(sc->sc_ah);
-       ath9k_ps_restore(sc);
-
-       sc->ps_idle = true;
-       ath_radio_disable(sc, hw);
-
        return 0;
 }
 
index 888abc2..528d5f3 100644 (file)
@@ -1271,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc,
 
        ath_rc_priv->max_valid_rate = k;
        ath_rc_sort_validrates(rate_table, ath_rc_priv);
-       ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
+       ath_rc_priv->rate_max_phy = (k > 4) ?
+                                       ath_rc_priv->valid_rate_index[k-4] :
+                                       ath_rc_priv->valid_rate_index[k-1];
        ath_rc_priv->rate_table = rate_table;
 
        ath_dbg(common, ATH_DBG_CONFIG,
index 4c8e296..ad5176d 100644 (file)
@@ -475,7 +475,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
 
        return rfilt;
 
-#undef RX_FILTER_PRESERVE
 }
 
 int ath_startrecv(struct ath_softc *sc)
@@ -1824,6 +1823,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
                rxs = IEEE80211_SKB_RXCB(hdr_skb);
                if (ieee80211_is_beacon(hdr->frame_control) &&
+                   !is_zero_ether_addr(common->curbssid) &&
                    !compare_ether_addr(hdr->addr3, common->curbssid))
                        rs.is_mybeacon = true;
                else
@@ -1923,15 +1923,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                        skb = hdr_skb;
                }
 
-               /*
-                * change the default rx antenna if rx diversity chooses the
-                * other antenna 3 times in a row.
-                */
-               if (sc->rx.defant != rs.rs_antenna) {
-                       if (++sc->rx.rxotherant >= 3)
-                               ath_setdefantenna(sc, rs.rs_antenna);
-               } else {
-                       sc->rx.rxotherant = 0;
+
+               if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+
+                       /*
+                        * change the default rx antenna if rx diversity
+                        * chooses the other antenna 3 times in a row.
+                        */
+                       if (sc->rx.defant != rs.rs_antenna) {
+                               if (++sc->rx.rxotherant >= 3)
+                                       ath_setdefantenna(sc, rs.rs_antenna);
+                       } else {
+                               sc->rx.rxotherant = 0;
+                       }
+
                }
 
                if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
index 4591097..6e2f188 100644 (file)
@@ -1006,6 +1006,8 @@ enum {
 #define AR_INTR_ASYNC_MASK                       (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
 #define AR_INTR_ASYNC_MASK_GPIO                  0xFFFC0000
 #define AR_INTR_ASYNC_MASK_GPIO_S                18
+#define AR_INTR_ASYNC_MASK_MCI                   0x00000080
+#define AR_INTR_ASYNC_MASK_MCI_S                 7
 
 #define AR_INTR_SYNC_MASK                        (AR_SREV_9340(ah) ? 0x401c : 0x4034)
 #define AR_INTR_SYNC_MASK_GPIO                   0xFFFC0000
@@ -1013,6 +1015,14 @@ enum {
 
 #define AR_INTR_ASYNC_CAUSE_CLR                  (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
 #define AR_INTR_ASYNC_CAUSE                      (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE_MCI                         0x00000080
+#define AR_INTR_ASYNC_USED                      (AR_INTR_MAC_IRQ | \
+                                                 AR_INTR_ASYNC_CAUSE_MCI)
+
+/* Asynchronous Interrupt Enable Register */
+#define AR_INTR_ASYNC_ENABLE_MCI         0x00000080
+#define AR_INTR_ASYNC_ENABLE_MCI_S       7
+
 
 #define AR_INTR_ASYNC_ENABLE                     (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
 #define AR_INTR_ASYNC_ENABLE_GPIO                0xFFFC0000
@@ -1269,6 +1279,8 @@ enum {
 #define AR_RTC_INTR_MASK \
        ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
 
+#define AR_RTC_KEEP_AWAKE      0x7034
+
 /* RTC_DERIVED_* - only for AR9100 */
 
 #define AR_RTC_DERIVED_CLK \
@@ -1555,6 +1567,8 @@ enum {
 #define AR_DIAG_FRAME_NV0           0x00020000
 #define AR_DIAG_OBS_PT_SEL1         0x000C0000
 #define AR_DIAG_OBS_PT_SEL1_S       18
+#define AR_DIAG_OBS_PT_SEL2         0x08000000
+#define AR_DIAG_OBS_PT_SEL2_S       27
 #define AR_DIAG_FORCE_RX_CLEAR      0x00100000 /* force rx_clear high */
 #define AR_DIAG_IGNORE_VIRT_CS      0x00200000
 #define AR_DIAG_FORCE_CH_IDLE_HIGH  0x00400000
@@ -1929,37 +1943,277 @@ enum {
 #define AR_PHY_AGC_CONTROL_YCOK_MAX_S          6
 
 /* MCI Registers */
-#define AR_MCI_INTERRUPT_RX_MSG_EN             0x183c
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET    0x00000001
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S  0
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL     0x00000002
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S   1
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK       0x00000004
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S     2
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO       0x00000008
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S     3
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST        0x00000010
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S      4
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO       0x00000020
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S     5
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT         0x00000040
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S       6
-#define AR_MCI_INTERRUPT_RX_MSG_GPM             0x00000100
-#define AR_MCI_INTERRUPT_RX_MSG_GPM_S           8
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO        0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S      9
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING    0x00000400
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S  10
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING      0x00000800
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S    11
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE        0x00001000
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S      12
-#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK        (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO  | \
+
+#define AR_MCI_COMMAND0                                0x1800
+#define AR_MCI_COMMAND0_HEADER                 0xFF
+#define AR_MCI_COMMAND0_HEADER_S               0
+#define AR_MCI_COMMAND0_LEN                    0x1f00
+#define AR_MCI_COMMAND0_LEN_S                  8
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP      0x2000
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S    13
+
+#define AR_MCI_COMMAND1                                0x1804
+
+#define AR_MCI_COMMAND2                                0x1808
+#define AR_MCI_COMMAND2_RESET_TX               0x01
+#define AR_MCI_COMMAND2_RESET_TX_S             0
+#define AR_MCI_COMMAND2_RESET_RX               0x02
+#define AR_MCI_COMMAND2_RESET_RX_S             1
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES     0x3FC
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S   2
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP        0x400
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S      10
+
+#define AR_MCI_RX_CTRL                         0x180c
+
+#define AR_MCI_TX_CTRL                         0x1810
+/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
+#define AR_MCI_TX_CTRL_CLK_DIV                 0x03
+#define AR_MCI_TX_CTRL_CLK_DIV_S               0
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE      0x04
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S    2
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ                0xFFFFF8
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S      3
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM         0xF000000
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S       24
+
+#define AR_MCI_MSG_ATTRIBUTES_TABLE                    0x1814
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM           0xFFFF
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S         0
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR                0xFFFF0000
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S      16
+
+#define AR_MCI_SCHD_TABLE_0                            0x1818
+#define AR_MCI_SCHD_TABLE_1                            0x181c
+#define AR_MCI_GPM_0                                   0x1820
+#define AR_MCI_GPM_1                                   0x1824
+#define AR_MCI_GPM_WRITE_PTR                           0xFFFF0000
+#define AR_MCI_GPM_WRITE_PTR_S                         16
+#define AR_MCI_GPM_BUF_LEN                             0x0000FFFF
+#define AR_MCI_GPM_BUF_LEN_S                           0
+
+#define AR_MCI_INTERRUPT_RAW                           0x1828
+#define AR_MCI_INTERRUPT_EN                            0x182c
+#define AR_MCI_INTERRUPT_SW_MSG_DONE                   0x00000001
+#define AR_MCI_INTERRUPT_SW_MSG_DONE_S                 0
+#define AR_MCI_INTERRUPT_CPU_INT_MSG                   0x00000002
+#define AR_MCI_INTERRUPT_CPU_INT_MSG_S                 1
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL                 0x00000004
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S               2
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR                        0x00000008
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S              3
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL                        0x00000010
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S              4
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL                        0x00000020
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S              5
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL                        0x00000080
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S              7
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL                        0x00000100
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S              8
+#define AR_MCI_INTERRUPT_RX_MSG                                0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_S                      9
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE           0x00000400
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S         10
+#define AR_MCI_INTERRUPT_BT_PRI                                0x07fff800
+#define AR_MCI_INTERRUPT_BT_PRI_S                      11
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH                 0x08000000
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S               27
+#define AR_MCI_INTERRUPT_BT_FREQ                       0x10000000
+#define AR_MCI_INTERRUPT_BT_FREQ_S                     28
+#define AR_MCI_INTERRUPT_BT_STOMP                      0x20000000
+#define AR_MCI_INTERRUPT_BT_STOMP_S                    29
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ                    0x40000000
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S                  30
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT             0x80000000
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S           31
+
+#define AR_MCI_INTERRUPT_DEFAULT    (AR_MCI_INTERRUPT_SW_MSG_DONE        | \
+                                    AR_MCI_INTERRUPT_RX_INVALID_HDR      | \
+                                    AR_MCI_INTERRUPT_RX_HW_MSG_FAIL      | \
+                                    AR_MCI_INTERRUPT_RX_SW_MSG_FAIL      | \
+                                    AR_MCI_INTERRUPT_TX_HW_MSG_FAIL      | \
+                                    AR_MCI_INTERRUPT_TX_SW_MSG_FAIL      | \
+                                    AR_MCI_INTERRUPT_RX_MSG              | \
+                                    AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
+                                    AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
+
+#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+                                       AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+                                       AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+                                       AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
+
+#define AR_MCI_REMOTE_CPU_INT                          0x1830
+#define AR_MCI_REMOTE_CPU_INT_EN                       0x1834
+#define AR_MCI_INTERRUPT_RX_MSG_RAW                    0x1838
+#define AR_MCI_INTERRUPT_RX_MSG_EN                     0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET           0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S         0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL            0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S          1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK              0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S            2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO              0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S            3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST               0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S             4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO              0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S            5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT                        0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S              6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM                    0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S                  8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO               0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S             9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING           0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S         10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING             0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S           11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE               0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S             12
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK         (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO  | \
                                          AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
                                          AR_MCI_INTERRUPT_RX_MSG_LNA_INFO   | \
                                          AR_MCI_INTERRUPT_RX_MSG_CONT_NACK  | \
                                          AR_MCI_INTERRUPT_RX_MSG_CONT_INFO  | \
                                          AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
 
+#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM    | \
+                                        AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
+                                        AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING  | \
+                                        AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
+                                        AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
+                                        AR_MCI_INTERRUPT_RX_MSG_LNA_INFO    | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_NACK   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_INFO   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_RST    | \
+                                        AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+
+#define AR_MCI_CPU_INT                                 0x1840
+
+#define AR_MCI_RX_STATUS                       0x1844
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX          0x00000F00
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S                8
+#define AR_MCI_RX_REMOTE_SLEEP                 0x00001000
+#define AR_MCI_RX_REMOTE_SLEEP_S               12
+#define AR_MCI_RX_MCI_CLK_REQ                  0x00002000
+#define AR_MCI_RX_MCI_CLK_REQ_S                        13
+
+#define AR_MCI_CONT_STATUS                     0x1848
+#define AR_MCI_CONT_RSSI_POWER                 0x000000FF
+#define AR_MCI_CONT_RSSI_POWER_S               0
+#define AR_MCI_CONT_RRIORITY                   0x0000FF00
+#define AR_MCI_CONT_RRIORITY_S                 8
+#define AR_MCI_CONT_TXRX                       0x00010000
+#define AR_MCI_CONT_TXRX_S                     16
+
+#define AR_MCI_BT_PRI0                         0x184c
+#define AR_MCI_BT_PRI1                         0x1850
+#define AR_MCI_BT_PRI2                         0x1854
+#define AR_MCI_BT_PRI3                         0x1858
+#define AR_MCI_BT_PRI                          0x185c
+#define AR_MCI_WL_FREQ0                                0x1860
+#define AR_MCI_WL_FREQ1                                0x1864
+#define AR_MCI_WL_FREQ2                                0x1868
+#define AR_MCI_GAIN                            0x186c
+#define AR_MCI_WBTIMER1                                0x1870
+#define AR_MCI_WBTIMER2                                0x1874
+#define AR_MCI_WBTIMER3                                0x1878
+#define AR_MCI_WBTIMER4                                0x187c
+#define AR_MCI_MAXGAIN                         0x1880
+#define AR_MCI_HW_SCHD_TBL_CTL                 0x1884
+#define AR_MCI_HW_SCHD_TBL_D0                  0x1888
+#define AR_MCI_HW_SCHD_TBL_D1                  0x188c
+#define AR_MCI_HW_SCHD_TBL_D2                  0x1890
+#define AR_MCI_HW_SCHD_TBL_D3                  0x1894
+#define AR_MCI_TX_PAYLOAD0                     0x1898
+#define AR_MCI_TX_PAYLOAD1                     0x189c
+#define AR_MCI_TX_PAYLOAD2                     0x18a0
+#define AR_MCI_TX_PAYLOAD3                     0x18a4
+#define AR_BTCOEX_WBTIMER                      0x18a8
+
+#define AR_BTCOEX_CTRL                                 0x18ac
+#define AR_BTCOEX_CTRL_AR9462_MODE                     0x00000001
+#define AR_BTCOEX_CTRL_AR9462_MODE_S                   0
+#define AR_BTCOEX_CTRL_WBTIMER_EN                      0x00000002
+#define AR_BTCOEX_CTRL_WBTIMER_EN_S                    1
+#define AR_BTCOEX_CTRL_MCI_MODE_EN                     0x00000004
+#define AR_BTCOEX_CTRL_MCI_MODE_EN_S                   2
+#define AR_BTCOEX_CTRL_LNA_SHARED                      0x00000008
+#define AR_BTCOEX_CTRL_LNA_SHARED_S                    3
+#define AR_BTCOEX_CTRL_PA_SHARED                       0x00000010
+#define AR_BTCOEX_CTRL_PA_SHARED_S                     4
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN          0x00000020
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S                5
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN       0x00000040
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S     6
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS                    0x00000180
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S                  7
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK                   0x00000E00
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S                 9
+#define AR_BTCOEX_CTRL_AGGR_THRESH                     0x00007000
+#define AR_BTCOEX_CTRL_AGGR_THRESH_S                   12
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN                     0x00080000
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S                   19
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK                     0x00100000
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S                   20
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN                  0x1FE00000
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S                        28
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR                    0x20000000
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S                  29
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10                  0x40000000
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S                        30
+#define AR_BTCOEX_CTRL_SPDT_POLARITY                   0x80000000
+#define AR_BTCOEX_CTRL_SPDT_POLARITY_S                 31
+
+#define AR_BTCOEX_WL_WEIGHTS0                          0x18b0
+#define AR_BTCOEX_WL_WEIGHTS1                          0x18b4
+#define AR_BTCOEX_WL_WEIGHTS2                          0x18b8
+#define AR_BTCOEX_WL_WEIGHTS3                          0x18bc
+#define AR_BTCOEX_MAX_TXPWR(_x)                                (0x18c0 + ((_x) << 2))
+#define AR_BTCOEX_WL_LNA                               0x1940
+#define AR_BTCOEX_RFGAIN_CTRL                          0x1944
+
+#define AR_BTCOEX_CTRL2                                        0x1948
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH                   0x0007F800
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S                 11
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK                  0x00380000
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S                        19
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT                    0x00400000
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S                  22
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL                   0x00800000
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S                 23
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL                 0x01000000
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S               24
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE                0x02000000
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S      25
+
+#define AR_BTCOEX_CTRL_SPDT_ENABLE          0x00000001
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_S        0
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL     0x00000002
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S   1
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT   0x00000004
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
+#define AR_GLB_WLAN_UART_INTF_EN            0x00020000
+#define AR_GLB_WLAN_UART_INTF_EN_S          17
+#define AR_GLB_DS_JTAG_DISABLE              0x00040000
+#define AR_GLB_DS_JTAG_DISABLE_S            18
+
+#define AR_BTCOEX_RC                    0x194c
+#define AR_BTCOEX_MAX_RFGAIN(_x)        (0x1950 + ((_x) << 2))
+#define AR_BTCOEX_DBG                   0x1a50
+#define AR_MCI_LAST_HW_MSG_HDR          0x1a54
+#define AR_MCI_LAST_HW_MSG_BDY          0x1a58
+
+#define AR_MCI_SCHD_TABLE_2             0x1a5c
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED   0x00000001
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
+#define AR_MCI_SCHD_TABLE_2_HW_BASED    0x00000002
+#define AR_MCI_SCHD_TABLE_2_HW_BASED_S  1
+
+#define AR_BTCOEX_CTRL3               0x1a60
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT      0x00000fff
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S    0
+
 
 #endif
index 55d077e..2622fce 100644 (file)
@@ -53,7 +53,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
                            int tx_flags, struct ath_txq *txq);
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
                                struct ath_txq *txq, struct list_head *bf_q,
-                               struct ath_tx_status *ts, int txok, int sendbar);
+                               struct ath_tx_status *ts, int txok);
 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                             struct list_head *head, bool internal);
 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
@@ -150,6 +150,12 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
        return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
 }
 
+static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
+{
+       ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
+                          seqno << IEEE80211_SEQ_SEQ_SHIFT);
+}
+
 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
        struct ath_txq *txq = tid->ac->txq;
@@ -158,28 +164,33 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
        struct list_head bf_head;
        struct ath_tx_status ts;
        struct ath_frame_info *fi;
+       bool sendbar = false;
 
        INIT_LIST_HEAD(&bf_head);
 
        memset(&ts, 0, sizeof(ts));
-       spin_lock_bh(&txq->axq_lock);
 
        while ((skb = __skb_dequeue(&tid->buf_q))) {
                fi = get_frame_info(skb);
                bf = fi->bf;
 
-               spin_unlock_bh(&txq->axq_lock);
                if (bf && fi->retries) {
                        list_add_tail(&bf->list, &bf_head);
                        ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
-                       ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+                       sendbar = true;
                } else {
                        ath_tx_send_normal(sc, txq, NULL, skb);
                }
-               spin_lock_bh(&txq->axq_lock);
        }
 
-       spin_unlock_bh(&txq->axq_lock);
+       if (tid->baw_head == tid->baw_tail) {
+               tid->state &= ~AGGR_ADDBA_COMPLETE;
+               tid->state &= ~AGGR_CLEANUP;
+       }
+
+       if (sendbar)
+               ath_send_bar(tid, tid->seq_start);
 }
 
 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -195,6 +206,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
        while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
                INCR(tid->seq_start, IEEE80211_SEQ_MAX);
                INCR(tid->baw_head, ATH_TID_MAX_BUFS);
+               if (tid->bar_index >= 0)
+                       tid->bar_index--;
        }
 }
 
@@ -238,9 +251,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
                bf = fi->bf;
 
                if (!bf) {
-                       spin_unlock(&txq->axq_lock);
                        ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
-                       spin_lock(&txq->axq_lock);
                        continue;
                }
 
@@ -249,24 +260,26 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
                if (fi->retries)
                        ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
 
-               spin_unlock(&txq->axq_lock);
-               ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
-               spin_lock(&txq->axq_lock);
+               ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
        }
 
        tid->seq_next = tid->seq_start;
        tid->baw_tail = tid->baw_head;
+       tid->bar_index = -1;
 }
 
 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
-                            struct sk_buff *skb)
+                            struct sk_buff *skb, int count)
 {
        struct ath_frame_info *fi = get_frame_info(skb);
        struct ath_buf *bf = fi->bf;
        struct ieee80211_hdr *hdr;
+       int prev = fi->retries;
 
        TX_STAT_INC(txq->axq_qnum, a_retries);
-       if (fi->retries++ > 0)
+       fi->retries += count;
+
+       if (prev > 0)
                return;
 
        hdr = (struct ieee80211_hdr *)skb->data;
@@ -365,7 +378,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
        struct list_head bf_head;
        struct sk_buff_head bf_pending;
-       u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
+       u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
        u32 ba[WME_BA_BMP_SIZE >> 5];
        int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
        bool rc_update = true;
@@ -374,6 +387,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        int nframes;
        u8 tidno;
        bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
+       int i, retries;
+       int bar_index = -1;
 
        skb = bf->bf_mpdu;
        hdr = (struct ieee80211_hdr *)skb->data;
@@ -382,6 +397,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 
        memcpy(rates, tx_info->control.rates, sizeof(rates));
 
+       retries = ts->ts_longretry + 1;
+       for (i = 0; i < ts->ts_rateindex; i++)
+               retries += rates[i].count;
+
        rcu_read_lock();
 
        sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
@@ -395,8 +414,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                        if (!bf->bf_stale || bf_next != NULL)
                                list_move_tail(&bf->list, &bf_head);
 
-                       ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
-                               0, 0);
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
 
                        bf = bf_next;
                }
@@ -406,6 +424,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        an = (struct ath_node *)sta->drv_priv;
        tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
        tid = ATH_AN_2_TID(an, tidno);
+       seq_first = tid->seq_start;
 
        /*
         * The hardware occasionally sends a tx status for the wrong TID.
@@ -455,25 +474,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                } else if (!isaggr && txok) {
                        /* transmit completion */
                        acked_cnt++;
+               } else if ((tid->state & AGGR_CLEANUP) || !retry) {
+                       /*
+                        * cleanup in progress, just fail
+                        * the un-acked sub-frames
+                        */
+                       txfail = 1;
+               } else if (flush) {
+                       txpending = 1;
+               } else if (fi->retries < ATH_MAX_SW_RETRIES) {
+                       if (txok || !an->sleeping)
+                               ath_tx_set_retry(sc, txq, bf->bf_mpdu,
+                                                retries);
+
+                       txpending = 1;
                } else {
-                       if ((tid->state & AGGR_CLEANUP) || !retry) {
-                               /*
-                                * cleanup in progress, just fail
-                                * the un-acked sub-frames
-                                */
-                               txfail = 1;
-                       } else if (flush) {
-                               txpending = 1;
-                       } else if (fi->retries < ATH_MAX_SW_RETRIES) {
-                               if (txok || !an->sleeping)
-                                       ath_tx_set_retry(sc, txq, bf->bf_mpdu);
-
-                               txpending = 1;
-                       } else {
-                               txfail = 1;
-                               sendbar = 1;
-                               txfail_cnt++;
-                       }
+                       txfail = 1;
+                       txfail_cnt++;
+                       bar_index = max_t(int, bar_index,
+                               ATH_BA_INDEX(seq_first, seqno));
                }
 
                /*
@@ -490,9 +509,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                         * complete the acked-ones/xretried ones; update
                         * block-ack window
                         */
-                       spin_lock_bh(&txq->axq_lock);
                        ath_tx_update_baw(sc, tid, seqno);
-                       spin_unlock_bh(&txq->axq_lock);
 
                        if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
                                memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -501,33 +518,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                        }
 
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
-                               !txfail, sendbar);
+                               !txfail);
                } else {
                        /* retry the un-acked ones */
-                       if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
-                               if (bf->bf_next == NULL && bf_last->bf_stale) {
-                                       struct ath_buf *tbf;
-
-                                       tbf = ath_clone_txbuf(sc, bf_last);
-                                       /*
-                                        * Update tx baw and complete the
-                                        * frame with failed status if we
-                                        * run out of tx buf.
-                                        */
-                                       if (!tbf) {
-                                               spin_lock_bh(&txq->axq_lock);
-                                               ath_tx_update_baw(sc, tid, seqno);
-                                               spin_unlock_bh(&txq->axq_lock);
-
-                                               ath_tx_complete_buf(sc, bf, txq,
-                                                                   &bf_head,
-                                                                   ts, 0,
-                                                                   !flush);
-                                               break;
-                                       }
-
-                                       fi->bf = tbf;
+                       if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+                           bf->bf_next == NULL && bf_last->bf_stale) {
+                               struct ath_buf *tbf;
+
+                               tbf = ath_clone_txbuf(sc, bf_last);
+                               /*
+                                * Update tx baw and complete the
+                                * frame with failed status if we
+                                * run out of tx buf.
+                                */
+                               if (!tbf) {
+                                       ath_tx_update_baw(sc, tid, seqno);
+
+                                       ath_tx_complete_buf(sc, bf, txq,
+                                                           &bf_head, ts, 0);
+                                       bar_index = max_t(int, bar_index,
+                                               ATH_BA_INDEX(seq_first, seqno));
+                                       break;
                                }
+
+                               fi->bf = tbf;
                        }
 
                        /*
@@ -540,12 +554,18 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                bf = bf_next;
        }
 
+       if (bar_index >= 0) {
+               u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
+               ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
+               if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
+                       tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
+       }
+
        /* prepend un-acked frames to the beginning of the pending frame queue */
        if (!skb_queue_empty(&bf_pending)) {
                if (an->sleeping)
                        ieee80211_sta_set_buffered(sta, tid->tidno, true);
 
-               spin_lock_bh(&txq->axq_lock);
                skb_queue_splice(&bf_pending, &tid->buf_q);
                if (!an->sleeping) {
                        ath_tx_queue_tid(txq, tid);
@@ -553,18 +573,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                        if (ts->ts_status & ATH9K_TXERR_FILT)
                                tid->ac->clear_ps_filter = true;
                }
-               spin_unlock_bh(&txq->axq_lock);
        }
 
-       if (tid->state & AGGR_CLEANUP) {
+       if (tid->state & AGGR_CLEANUP)
                ath_tx_flush_tid(sc, tid);
 
-               if (tid->baw_head == tid->baw_tail) {
-                       tid->state &= ~AGGR_ADDBA_COMPLETE;
-                       tid->state &= ~AGGR_CLEANUP;
-               }
-       }
-
        rcu_read_unlock();
 
        if (needreset) {
@@ -618,24 +631,26 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
        max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
 
        for (i = 0; i < 4; i++) {
-               if (rates[i].count) {
-                       int modeidx;
-                       if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
-                               legacy = 1;
-                               break;
-                       }
+               int modeidx;
 
-                       if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-                               modeidx = MCS_HT40;
-                       else
-                               modeidx = MCS_HT20;
-
-                       if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
-                               modeidx++;
+               if (!rates[i].count)
+                       continue;
 
-                       frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
-                       max_4ms_framelen = min(max_4ms_framelen, frmlen);
+               if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
+                       legacy = 1;
+                       break;
                }
+
+               if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+                       modeidx = MCS_HT40;
+               else
+                       modeidx = MCS_HT20;
+
+               if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+                       modeidx++;
+
+               frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
+               max_4ms_framelen = min(max_4ms_framelen, frmlen);
        }
 
        /*
@@ -771,8 +786,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
 
                bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
                seqno = bf->bf_state.seqno;
-               if (!bf_first)
-                       bf_first = bf;
 
                /* do not step over block-ack window */
                if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
@@ -780,6 +793,21 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
                        break;
                }
 
+               if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
+                       struct ath_tx_status ts = {};
+                       struct list_head bf_head;
+
+                       INIT_LIST_HEAD(&bf_head);
+                       list_add(&bf->list, &bf_head);
+                       __skb_unlink(skb, &tid->buf_q);
+                       ath_tx_update_baw(sc, tid, seqno);
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+                       continue;
+               }
+
+               if (!bf_first)
+                       bf_first = bf;
+
                if (!rl) {
                        aggr_limit = ath_lookup_rate(sc, bf, tid);
                        rl = 1;
@@ -1122,6 +1150,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
        txtid->state |= AGGR_ADDBA_PROGRESS;
        txtid->paused = true;
        *ssn = txtid->seq_start = txtid->seq_next;
+       txtid->bar_index = -1;
 
        memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
        txtid->baw_head = txtid->baw_tail = 0;
@@ -1156,9 +1185,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
                txtid->state |= AGGR_CLEANUP;
        else
                txtid->state &= ~AGGR_ADDBA_COMPLETE;
-       spin_unlock_bh(&txq->axq_lock);
 
        ath_tx_flush_tid(sc, txtid);
+       spin_unlock_bh(&txq->axq_lock);
 }
 
 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
@@ -1400,8 +1429,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
 
 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
                               struct list_head *list, bool retry_tx)
-       __releases(txq->axq_lock)
-       __acquires(txq->axq_lock)
 {
        struct ath_buf *bf, *lastbf;
        struct list_head bf_head;
@@ -1428,13 +1455,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
                if (bf_is_ampdu_not_probing(bf))
                        txq->axq_ampdu_depth--;
 
-               spin_unlock_bh(&txq->axq_lock);
                if (bf_isampdu(bf))
                        ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
                                             retry_tx);
                else
-                       ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
-               spin_lock_bh(&txq->axq_lock);
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
        }
 }
 
@@ -1561,11 +1586,9 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                                break;
                }
 
-               if (!list_empty(&ac->tid_q)) {
-                       if (!ac->sched) {
-                               ac->sched = true;
-                               list_add_tail(&ac->list, &txq->axq_acq);
-                       }
+               if (!list_empty(&ac->tid_q) && !ac->sched) {
+                       ac->sched = true;
+                       list_add_tail(&ac->list, &txq->axq_acq);
                }
 
                if (ac == last_ac ||
@@ -1708,10 +1731,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
        list_add_tail(&bf->list, &bf_head);
        bf->bf_state.bf_type = 0;
 
-       /* update starting sequence number for subsequent ADDBA request */
-       if (tid)
-               INCR(tid->seq_start, IEEE80211_SEQ_MAX);
-
        bf->bf_lastbf = bf;
        ath_tx_fill_desc(sc, bf, txq, fi->framelen);
        ath_tx_txqaddbuf(sc, txq, &bf_head, false);
@@ -1819,7 +1838,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
        struct ath_buf *bf;
        u8 tidno;
 
-       spin_lock_bh(&txctl->txq->axq_lock);
        if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
                ieee80211_is_data_qos(hdr->frame_control)) {
                tidno = ieee80211_get_qos_ctl(hdr)[0] &
@@ -1838,7 +1856,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
        } else {
                bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
                if (!bf)
-                       goto out;
+                       return;
 
                bf->bf_state.bfs_paprd = txctl->paprd;
 
@@ -1847,9 +1865,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
 
                ath_tx_send_normal(sc, txctl->txq, tid, skb);
        }
-
-out:
-       spin_unlock_bh(&txctl->txq->axq_lock);
 }
 
 /* Upon failure caller should free skb */
@@ -1914,11 +1929,13 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
        if (txq == sc->tx.txq_map[q] &&
            ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
                ieee80211_stop_queue(sc->hw, q);
-               txq->stopped = 1;
+               txq->stopped = true;
        }
-       spin_unlock_bh(&txq->axq_lock);
 
        ath_tx_start_dma(sc, skb, txctl);
+
+       spin_unlock_bh(&txq->axq_lock);
+
        return 0;
 }
 
@@ -1937,9 +1954,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 
        ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
 
-       if (tx_flags & ATH_TX_BAR)
-               tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
        if (!(tx_flags & ATH_TX_ERROR))
                /* Frame was ACKed */
                tx_info->flags |= IEEE80211_TX_STAT_ACK;
@@ -1955,7 +1969,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
                skb_pull(skb, padsize);
        }
 
-       if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+       if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
                sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
                ath_dbg(common, ATH_DBG_PS,
                        "Going back to sleep after having received TX status (0x%lx)\n",
@@ -1967,15 +1981,13 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 
        q = skb_get_queue_mapping(skb);
        if (txq == sc->tx.txq_map[q]) {
-               spin_lock_bh(&txq->axq_lock);
                if (WARN_ON(--txq->pending_frames < 0))
                        txq->pending_frames = 0;
 
                if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
                        ieee80211_wake_queue(sc->hw, q);
-                       txq->stopped = 0;
+                       txq->stopped = false;
                }
-               spin_unlock_bh(&txq->axq_lock);
        }
 
        ieee80211_tx_status(hw, skb);
@@ -1983,16 +1995,13 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
                                struct ath_txq *txq, struct list_head *bf_q,
-                               struct ath_tx_status *ts, int txok, int sendbar)
+                               struct ath_tx_status *ts, int txok)
 {
        struct sk_buff *skb = bf->bf_mpdu;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        unsigned long flags;
        int tx_flags = 0;
 
-       if (sendbar)
-               tx_flags = ATH_TX_BAR;
-
        if (!txok)
                tx_flags |= ATH_TX_ERROR;
 
@@ -2084,8 +2093,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
                                  struct ath_tx_status *ts, struct ath_buf *bf,
                                  struct list_head *bf_head)
-       __releases(txq->axq_lock)
-       __acquires(txq->axq_lock)
 {
        int txok;
 
@@ -2095,16 +2102,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
        if (bf_is_ampdu_not_probing(bf))
                txq->axq_ampdu_depth--;
 
-       spin_unlock_bh(&txq->axq_lock);
-
        if (!bf_isampdu(bf)) {
                ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
-               ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
+               ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
        } else
                ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
 
-       spin_lock_bh(&txq->axq_lock);
-
        if (sc->sc_flags & SC_OP_TXAGGR)
                ath_txq_schedule(sc, txq);
 }
index 5518592..db77421 100644 (file)
@@ -48,7 +48,7 @@
 #include "carl9170.h"
 #include "cmd.h"
 
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
 
index 65ecb5b..10dea37 100644 (file)
@@ -21,6 +21,8 @@
 #include "regd.h"
 #include "regd_common.h"
 
+static int __ath_regd_init(struct ath_regulatory *reg);
+
 /*
  * This is a set of common rules used by our world regulatory domains.
  * We have 12 world regulatory domains. To save space we consolidate
@@ -347,10 +349,26 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
        }
 }
 
+static u16 ath_regd_find_country_by_name(char *alpha2)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+               if (!memcmp(allCountries[i].isoName, alpha2, 2))
+                       return allCountries[i].countryCode;
+       }
+
+       return -1;
+}
+
 int ath_reg_notifier_apply(struct wiphy *wiphy,
                           struct regulatory_request *request,
                           struct ath_regulatory *reg)
 {
+       struct ath_common *common = container_of(reg, struct ath_common,
+                                                regulatory);
+       u16 country_code;
+
        /* We always apply this */
        ath_reg_apply_radar_flags(wiphy);
 
@@ -363,14 +381,37 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
                return 0;
 
        switch (request->initiator) {
-       case NL80211_REGDOM_SET_BY_DRIVER:
        case NL80211_REGDOM_SET_BY_CORE:
+               /*
+                * If common->reg_world_copy is world roaming it means we *were*
+                * world roaming... so we now have to restore that data.
+                */
+               if (!ath_is_world_regd(&common->reg_world_copy))
+                       break;
+
+               memcpy(reg, &common->reg_world_copy,
+                      sizeof(struct ath_regulatory));
+               break;
+       case NL80211_REGDOM_SET_BY_DRIVER:
        case NL80211_REGDOM_SET_BY_USER:
                break;
        case NL80211_REGDOM_SET_BY_COUNTRY_IE:
-               if (ath_is_world_regd(reg))
-                       ath_reg_apply_world_flags(wiphy, request->initiator,
-                                                 reg);
+               if (!ath_is_world_regd(reg))
+                       break;
+
+               country_code = ath_regd_find_country_by_name(request->alpha2);
+               if (country_code == (u16) -1)
+                       break;
+
+               reg->current_rd = COUNTRY_ERD_FLAG;
+               reg->current_rd |= country_code;
+
+               printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
+                       reg->current_rd);
+               __ath_regd_init(reg);
+
+               ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
                break;
        }
 
@@ -508,11 +549,7 @@ static void ath_regd_sanitize(struct ath_regulatory *reg)
        reg->current_rd = 0x64;
 }
 
-int
-ath_regd_init(struct ath_regulatory *reg,
-             struct wiphy *wiphy,
-             int (*reg_notifier)(struct wiphy *wiphy,
-                                 struct regulatory_request *request))
+static int __ath_regd_init(struct ath_regulatory *reg)
 {
        struct country_code_to_enum_rd *country = NULL;
        u16 regdmn;
@@ -583,7 +620,29 @@ ath_regd_init(struct ath_regulatory *reg,
        printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
                reg->regpair->regDmnEnum);
 
+       return 0;
+}
+
+int
+ath_regd_init(struct ath_regulatory *reg,
+             struct wiphy *wiphy,
+             int (*reg_notifier)(struct wiphy *wiphy,
+                                 struct regulatory_request *request))
+{
+       struct ath_common *common = container_of(reg, struct ath_common,
+                                                regulatory);
+       int r;
+
+       r = __ath_regd_init(reg);
+       if (r)
+               return r;
+
+       if (ath_is_world_regd(reg))
+               memcpy(&common->reg_world_copy, reg,
+                      sizeof(struct ath_regulatory));
+
        ath_regd_init_wiphy(reg, wiphy, reg_notifier);
+
        return 0;
 }
 EXPORT_SYMBOL(ath_regd_init);
index 5e45604..af23968 100644 (file)
@@ -890,7 +890,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
        else
                ring->ops = &dma32_ops;
        if (for_tx) {
-               ring->tx = 1;
+               ring->tx = true;
                ring->current_slot = -1;
        } else {
                if (ring->index == 0) {
@@ -1061,7 +1061,7 @@ void b43_dma_free(struct b43_wldev *dev)
 static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
 {
        u64 orig_mask = mask;
-       bool fallback = 0;
+       bool fallback = false;
        int err;
 
        /* Try to set the DMA mask. If it fails, try falling back to a
@@ -1075,12 +1075,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
                }
                if (mask == DMA_BIT_MASK(64)) {
                        mask = DMA_BIT_MASK(32);
-                       fallback = 1;
+                       fallback = true;
                        continue;
                }
                if (mask == DMA_BIT_MASK(32)) {
                        mask = DMA_BIT_MASK(30);
-                       fallback = 1;
+                       fallback = true;
                        continue;
                }
                b43err(dev->wl, "The machine/kernel does not support "
@@ -1307,7 +1307,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
        memset(meta, 0, sizeof(*meta));
 
        meta->skb = skb;
-       meta->is_last_fragment = 1;
+       meta->is_last_fragment = true;
        priv_info->bouncebuffer = NULL;
 
        meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
@@ -1466,7 +1466,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
            should_inject_overflow(ring)) {
                /* This TX ring is full. */
                ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
-               ring->stopped = 1;
+               ring->stopped = true;
                if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
                        b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
                }
@@ -1585,7 +1585,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
        if (ring->stopped) {
                B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
                ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
-               ring->stopped = 0;
+               ring->stopped = false;
                if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
                        b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
                }
index a38c1c6..d79ab2a 100644 (file)
@@ -74,7 +74,7 @@ static void b43_led_update(struct b43_wldev *dev,
        if (radio_enabled)
                turn_on = atomic_read(&led->state) != LED_OFF;
        else
-               turn_on = 0;
+               turn_on = false;
        if (turn_on == led->hw_state)
                return;
        led->hw_state = turn_on;
@@ -225,11 +225,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
        if (sprom[led_index] == 0xFF) {
                /* There is no LED information in the SPROM
                 * for this LED. Hardcode it here. */
-               *activelow = 0;
+               *activelow = false;
                switch (led_index) {
                case 0:
                        *behaviour = B43_LED_ACTIVITY;
-                       *activelow = 1;
+                       *activelow = true;
                        if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ)
                                *behaviour = B43_LED_RADIO_ALL;
                        break;
@@ -267,11 +267,11 @@ void b43_leds_init(struct b43_wldev *dev)
        if (led->wl) {
                if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
                        b43_led_turn_on(dev, led->index, led->activelow);
-                       led->hw_state = 1;
+                       led->hw_state = true;
                        atomic_set(&led->state, 1);
                } else {
                        b43_led_turn_off(dev, led->index, led->activelow);
-                       led->hw_state = 0;
+                       led->hw_state = false;
                        atomic_set(&led->state, 0);
                }
        }
@@ -280,19 +280,19 @@ void b43_leds_init(struct b43_wldev *dev)
        led = &dev->wl->leds.led_tx;
        if (led->wl) {
                b43_led_turn_off(dev, led->index, led->activelow);
-               led->hw_state = 0;
+               led->hw_state = false;
                atomic_set(&led->state, 0);
        }
        led = &dev->wl->leds.led_rx;
        if (led->wl) {
                b43_led_turn_off(dev, led->index, led->activelow);
-               led->hw_state = 0;
+               led->hw_state = false;
                atomic_set(&led->state, 0);
        }
        led = &dev->wl->leds.led_assoc;
        if (led->wl) {
                b43_led_turn_off(dev, led->index, led->activelow);
-               led->hw_state = 0;
+               led->hw_state = false;
                atomic_set(&led->state, 0);
        }
 
index 4c82d58..916123a 100644 (file)
@@ -826,7 +826,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
        const struct b43_rfatt *rfatt;
        const struct b43_bbatt *bbatt;
        u64 power_vector;
-       bool table_changed = 0;
+       bool table_changed = false;
 
        BUILD_BUG_ON(B43_DC_LT_SIZE != 32);
        B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64);
@@ -876,7 +876,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
                        lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00)
                                         | (val & 0x00FF);
                }
-               table_changed = 1;
+               table_changed = true;
        }
        if (table_changed) {
                /* The table changed in memory. Update the hardware table. */
@@ -938,7 +938,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
        unsigned long now;
        unsigned long expire;
        struct b43_lo_calib *cal, *tmp;
-       bool current_item_expired = 0;
+       bool current_item_expired = false;
        bool hwpctl;
 
        if (!lo)
@@ -968,7 +968,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
                if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) &&
                    b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) {
                        B43_WARN_ON(current_item_expired);
-                       current_item_expired = 1;
+                       current_item_expired = true;
                }
                if (b43_debug(dev, B43_DBG_LO)) {
                        b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), "
index 5634d9a..c74f36f 100644 (file)
@@ -1122,17 +1122,17 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
        B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP));
 
        if (ps_flags & B43_PS_ENABLED) {
-               hwps = 1;
+               hwps = true;
        } else if (ps_flags & B43_PS_DISABLED) {
-               hwps = 0;
+               hwps = false;
        } else {
                //TODO: If powersave is not off and FIXME is not set and we are not in adhoc
                //      and thus is not an AP and we are associated, set bit 25
        }
        if (ps_flags & B43_PS_AWAKE) {
-               awake = 1;
+               awake = true;
        } else if (ps_flags & B43_PS_ASLEEP) {
-               awake = 0;
+               awake = false;
        } else {
                //TODO: If the device is awake or this is an AP, or we are scanning, or FIXME,
                //      or we are associated, or FIXME, or the latest PS-Poll packet sent was
@@ -1140,8 +1140,8 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
        }
 
 /* FIXME: For now we force awake-on and hwps-off */
-       hwps = 0;
-       awake = 1;
+       hwps = false;
+       awake = true;
 
        macctl = b43_read32(dev, B43_MMIO_MACCTL);
        if (hwps)
@@ -1339,7 +1339,7 @@ static void b43_calculate_link_quality(struct b43_wldev *dev)
                return;
        if (dev->noisecalc.calculation_running)
                return;
-       dev->noisecalc.calculation_running = 1;
+       dev->noisecalc.calculation_running = true;
        dev->noisecalc.nr_samples = 0;
 
        b43_generate_noise_sample(dev);
@@ -1408,7 +1408,7 @@ static void handle_irq_noise(struct b43_wldev *dev)
                        average -= 48;
 
                dev->stats.link_noise = average;
-               dev->noisecalc.calculation_running = 0;
+               dev->noisecalc.calculation_running = false;
                return;
        }
 generate_new:
@@ -1424,7 +1424,7 @@ static void handle_irq_tbtt_indication(struct b43_wldev *dev)
                        b43_power_saving_ctl_bits(dev, 0);
        }
        if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
-               dev->dfq_valid = 1;
+               dev->dfq_valid = true;
 }
 
 static void handle_irq_atim_end(struct b43_wldev *dev)
@@ -1433,7 +1433,7 @@ static void handle_irq_atim_end(struct b43_wldev *dev)
                b43_write32(dev, B43_MMIO_MACCMD,
                            b43_read32(dev, B43_MMIO_MACCMD)
                            | B43_MACCMD_DFQ_VALID);
-               dev->dfq_valid = 0;
+               dev->dfq_valid = false;
        }
 }
 
@@ -1539,7 +1539,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
        unsigned int i, len, variable_len;
        const struct ieee80211_mgmt *bcn;
        const u8 *ie;
-       bool tim_found = 0;
+       bool tim_found = false;
        unsigned int rate;
        u16 ctl;
        int antenna;
@@ -1588,7 +1588,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
                        /* A valid TIM is at least 4 bytes long. */
                        if (ie_len < 4)
                                break;
-                       tim_found = 1;
+                       tim_found = true;
 
                        tim_position = sizeof(struct b43_plcp_hdr6);
                        tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable);
@@ -1625,7 +1625,7 @@ static void b43_upload_beacon0(struct b43_wldev *dev)
        if (wl->beacon0_uploaded)
                return;
        b43_write_beacon_template(dev, 0x68, 0x18);
-       wl->beacon0_uploaded = 1;
+       wl->beacon0_uploaded = true;
 }
 
 static void b43_upload_beacon1(struct b43_wldev *dev)
@@ -1635,7 +1635,7 @@ static void b43_upload_beacon1(struct b43_wldev *dev)
        if (wl->beacon1_uploaded)
                return;
        b43_write_beacon_template(dev, 0x468, 0x1A);
-       wl->beacon1_uploaded = 1;
+       wl->beacon1_uploaded = true;
 }
 
 static void handle_irq_beacon(struct b43_wldev *dev)
@@ -1667,7 +1667,7 @@ static void handle_irq_beacon(struct b43_wldev *dev)
        if (unlikely(wl->beacon_templates_virgin)) {
                /* We never uploaded a beacon before.
                 * Upload both templates now, but only mark one valid. */
-               wl->beacon_templates_virgin = 0;
+               wl->beacon_templates_virgin = false;
                b43_upload_beacon0(dev);
                b43_upload_beacon1(dev);
                cmd = b43_read32(dev, B43_MMIO_MACCMD);
@@ -1755,8 +1755,8 @@ static void b43_update_templates(struct b43_wl *wl)
        if (wl->current_beacon)
                dev_kfree_skb_any(wl->current_beacon);
        wl->current_beacon = beacon;
-       wl->beacon0_uploaded = 0;
-       wl->beacon1_uploaded = 0;
+       wl->beacon0_uploaded = false;
+       wl->beacon1_uploaded = false;
        ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
 }
 
@@ -1913,7 +1913,7 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
                        b43err(dev->wl, "This device does not support DMA "
                               "on your system. It will now be switched to PIO.\n");
                        /* Fall back to PIO transfers if we get fatal DMA errors! */
-                       dev->use_pio = 1;
+                       dev->use_pio = true;
                        b43_controller_restart(dev, "DMA error");
                        return;
                }
@@ -2240,12 +2240,12 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
                filename = NULL;
        else
                goto err_no_pcm;
-       fw->pcm_request_failed = 0;
+       fw->pcm_request_failed = false;
        err = b43_do_request_fw(ctx, filename, &fw->pcm);
        if (err == -ENOENT) {
                /* We did not find a PCM file? Not fatal, but
                 * core rev <= 10 must do without hwcrypto then. */
-               fw->pcm_request_failed = 1;
+               fw->pcm_request_failed = true;
        } else if (err)
                goto err_load;
 
@@ -2535,7 +2535,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
        dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
        dev->qos_enabled = !!modparam_qos;
        /* Default to firmware/hardware crypto acceleration. */
-       dev->hwcrypto_enabled = 1;
+       dev->hwcrypto_enabled = true;
 
        if (dev->fw.opensource) {
                u16 fwcapa;
@@ -2549,7 +2549,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
                if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) {
                        b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n");
                        /* Disable hardware crypto and fall back to software crypto. */
-                       dev->hwcrypto_enabled = 0;
+                       dev->hwcrypto_enabled = false;
                }
                if (!(fwcapa & B43_FWCAPA_QOS)) {
                        b43info(dev->wl, "QoS not supported by firmware\n");
@@ -2557,7 +2557,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
                         * ieee80211_unregister to make sure the networking core can
                         * properly free possible resources. */
                        dev->wl->hw->queues = 1;
-                       dev->qos_enabled = 0;
+                       dev->qos_enabled = false;
                }
        } else {
                b43info(dev->wl, "Loading firmware version %u.%u "
@@ -3361,10 +3361,10 @@ static int b43_rng_init(struct b43_wl *wl)
        wl->rng.name = wl->rng_name;
        wl->rng.data_read = b43_rng_read;
        wl->rng.priv = (unsigned long)wl;
-       wl->rng_initialized = 1;
+       wl->rng_initialized = true;
        err = hwrng_register(&wl->rng);
        if (err) {
-               wl->rng_initialized = 0;
+               wl->rng_initialized = false;
                b43err(wl, "Failed to register the random "
                       "number generator (%d)\n", err);
        }
@@ -3702,13 +3702,13 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
                case IEEE80211_BAND_5GHZ:
                        if (d->phy.supports_5ghz) {
                                up_dev = d;
-                               gmode = 0;
+                               gmode = false;
                        }
                        break;
                case IEEE80211_BAND_2GHZ:
                        if (d->phy.supports_2ghz) {
                                up_dev = d;
-                               gmode = 1;
+                               gmode = true;
                        }
                        break;
                default:
@@ -4425,18 +4425,18 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev,
        atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
 
 #if B43_DEBUG
-       phy->phy_locked = 0;
-       phy->radio_locked = 0;
+       phy->phy_locked = false;
+       phy->radio_locked = false;
 #endif
 }
 
 static void setup_struct_wldev_for_init(struct b43_wldev *dev)
 {
-       dev->dfq_valid = 0;
+       dev->dfq_valid = false;
 
        /* Assume the radio is enabled. If it's not enabled, the state will
         * immediately get fixed on the first periodic work run. */
-       dev->radio_hw_enable = 1;
+       dev->radio_hw_enable = true;
 
        /* Stats */
        memset(&dev->stats, 0, sizeof(dev->stats));
@@ -4670,16 +4670,16 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
 
        if (b43_bus_host_is_pcmcia(dev->dev) ||
            b43_bus_host_is_sdio(dev->dev)) {
-               dev->__using_pio_transfers = 1;
+               dev->__using_pio_transfers = true;
                err = b43_pio_init(dev);
        } else if (dev->use_pio) {
                b43warn(dev->wl, "Forced PIO by use_pio module parameter. "
                        "This should not be needed and will result in lower "
                        "performance.\n");
-               dev->__using_pio_transfers = 1;
+               dev->__using_pio_transfers = true;
                err = b43_pio_init(dev);
        } else {
-               dev->__using_pio_transfers = 0;
+               dev->__using_pio_transfers = false;
                err = b43_dma_init(dev);
        }
        if (err)
@@ -4733,7 +4733,7 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
        b43dbg(wl, "Adding Interface type %d\n", vif->type);
 
        dev = wl->current_dev;
-       wl->operating = 1;
+       wl->operating = true;
        wl->vif = vif;
        wl->if_type = vif->type;
        memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -4767,7 +4767,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw,
        B43_WARN_ON(wl->vif != vif);
        wl->vif = NULL;
 
-       wl->operating = 0;
+       wl->operating = false;
 
        b43_adjust_opmode(dev);
        memset(wl->mac_addr, 0, ETH_ALEN);
@@ -4789,12 +4789,12 @@ static int b43_op_start(struct ieee80211_hw *hw)
        memset(wl->bssid, 0, ETH_ALEN);
        memset(wl->mac_addr, 0, ETH_ALEN);
        wl->filter_flags = 0;
-       wl->radiotap_enabled = 0;
+       wl->radiotap_enabled = false;
        b43_qos_clear(wl);
-       wl->beacon0_uploaded = 0;
-       wl->beacon1_uploaded = 0;
-       wl->beacon_templates_virgin = 1;
-       wl->radio_enabled = 1;
+       wl->beacon0_uploaded = false;
+       wl->beacon1_uploaded = false;
+       wl->beacon_templates_virgin = true;
+       wl->radio_enabled = true;
 
        mutex_lock(&wl->mutex);
 
@@ -4840,7 +4840,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
                        goto out_unlock;
        }
        b43_wireless_core_exit(dev);
-       wl->radio_enabled = 0;
+       wl->radio_enabled = false;
 
 out_unlock:
        mutex_unlock(&wl->mutex);
@@ -5028,7 +5028,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
        struct pci_dev *pdev = NULL;
        int err;
        u32 tmp;
-       bool have_2ghz_phy = 0, have_5ghz_phy = 0;
+       bool have_2ghz_phy = false, have_5ghz_phy = false;
 
        /* Do NOT do any device initialization here.
         * Do it in wireless_core_init() instead.
@@ -5071,7 +5071,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
        }
 
        dev->phy.gmode = have_2ghz_phy;
-       dev->phy.radio_on = 1;
+       dev->phy.radio_on = true;
        b43_wireless_core_reset(dev, dev->phy.gmode);
 
        err = b43_phy_versioning(dev);
@@ -5082,11 +5082,11 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
            (pdev->device != 0x4312 &&
             pdev->device != 0x4319 && pdev->device != 0x4324)) {
                /* No multiband support. */
-               have_2ghz_phy = 0;
-               have_5ghz_phy = 0;
+               have_2ghz_phy = false;
+               have_5ghz_phy = false;
                switch (dev->phy.type) {
                case B43_PHYTYPE_A:
-                       have_5ghz_phy = 1;
+                       have_5ghz_phy = true;
                        break;
                case B43_PHYTYPE_LP: //FIXME not always!
 #if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
@@ -5096,7 +5096,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
                case B43_PHYTYPE_N:
                case B43_PHYTYPE_HT:
                case B43_PHYTYPE_LCN:
-                       have_2ghz_phy = 1;
+                       have_2ghz_phy = true;
                        break;
                default:
                        B43_WARN_ON(1);
@@ -5112,8 +5112,8 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
                /* FIXME: For now we disable the A-PHY on multi-PHY devices. */
                if (dev->phy.type != B43_PHYTYPE_N &&
                    dev->phy.type != B43_PHYTYPE_LP) {
-                       have_2ghz_phy = 1;
-                       have_5ghz_phy = 0;
+                       have_2ghz_phy = true;
+                       have_5ghz_phy = false;
                }
        }
 
index 3ea44bb..3f8883b 100644 (file)
@@ -145,7 +145,7 @@ void b43_radio_lock(struct b43_wldev *dev)
 
 #if B43_DEBUG
        B43_WARN_ON(dev->phy.radio_locked);
-       dev->phy.radio_locked = 1;
+       dev->phy.radio_locked = true;
 #endif
 
        macctl = b43_read32(dev, B43_MMIO_MACCTL);
@@ -163,7 +163,7 @@ void b43_radio_unlock(struct b43_wldev *dev)
 
 #if B43_DEBUG
        B43_WARN_ON(!dev->phy.radio_locked);
-       dev->phy.radio_locked = 0;
+       dev->phy.radio_locked = false;
 #endif
 
        /* Commit any write */
@@ -178,7 +178,7 @@ void b43_phy_lock(struct b43_wldev *dev)
 {
 #if B43_DEBUG
        B43_WARN_ON(dev->phy.phy_locked);
-       dev->phy.phy_locked = 1;
+       dev->phy.phy_locked = true;
 #endif
        B43_WARN_ON(dev->dev->core_rev < 3);
 
@@ -190,7 +190,7 @@ void b43_phy_unlock(struct b43_wldev *dev)
 {
 #if B43_DEBUG
        B43_WARN_ON(!dev->phy.phy_locked);
-       dev->phy.phy_locked = 0;
+       dev->phy.phy_locked = false;
 #endif
        B43_WARN_ON(dev->dev->core_rev < 3);
 
index 8e157bc..12f467b 100644 (file)
@@ -897,7 +897,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
                if (b43_phy_read(dev, 0x0033) & 0x0800)
                        break;
 
-               gphy->aci_enable = 1;
+               gphy->aci_enable = true;
 
                phy_stacksave(B43_PHY_RADIO_BITFIELD);
                phy_stacksave(B43_PHY_G_CRS);
@@ -1038,7 +1038,7 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
                if (!(b43_phy_read(dev, 0x0033) & 0x0800))
                        break;
 
-               gphy->aci_enable = 0;
+               gphy->aci_enable = false;
 
                phy_stackrestore(B43_PHY_RADIO_BITFIELD);
                phy_stackrestore(B43_PHY_G_CRS);
@@ -1956,10 +1956,10 @@ static void b43_phy_init_pctl(struct b43_wldev *dev)
                        bbatt.att = 11;
                        if (phy->radio_rev == 8) {
                                rfatt.att = 15;
-                               rfatt.with_padmix = 1;
+                               rfatt.with_padmix = true;
                        } else {
                                rfatt.att = 9;
-                               rfatt.with_padmix = 0;
+                               rfatt.with_padmix = false;
                        }
                        b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
                }
@@ -2137,7 +2137,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
        struct b43_bus_dev *bdev = dev->dev;
        struct b43_phy *phy = &dev->phy;
 
-       rf->with_padmix = 0;
+       rf->with_padmix = false;
 
        if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
            dev->dev->board_type == SSB_BOARD_BCM4309G) {
@@ -2221,7 +2221,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
                        return;
                case 8:
                        rf->att = 0xA;
-                       rf->with_padmix = 1;
+                       rf->with_padmix = true;
                        return;
                case 9:
                default:
@@ -2389,7 +2389,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
        B43_WARN_ON((dev->dev->chip_id == 0x4301) &&
                    (phy->radio_ver != 0x2050)); /* Not supported anymore */
 
-       gphy->dyn_tssi_tbl = 0;
+       gphy->dyn_tssi_tbl = false;
 
        if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
            pab0 != -1 && pab1 != -1 && pab2 != -1) {
@@ -2404,7 +2404,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
                                                               pab1, pab2);
                if (!gphy->tssi2dbm)
                        return -ENOMEM;
-               gphy->dyn_tssi_tbl = 1;
+               gphy->dyn_tssi_tbl = true;
        } else {
                /* pabX values not set in SPROM. */
                gphy->tgt_idle_tssi = 52;
@@ -2504,7 +2504,7 @@ static void b43_gphy_op_free(struct b43_wldev *dev)
 
        if (gphy->dyn_tssi_tbl)
                kfree(gphy->tssi2dbm);
-       gphy->dyn_tssi_tbl = 0;
+       gphy->dyn_tssi_tbl = false;
        gphy->tssi2dbm = NULL;
 
        kfree(gphy);
@@ -2531,10 +2531,10 @@ static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev)
        if (phy->rev == 1) {
                /* Workaround: Temporarly disable gmode through the early init
                 * phase, as the gmode stuff is not needed for phy rev 1 */
-               phy->gmode = 0;
+               phy->gmode = false;
                b43_wireless_core_reset(dev, 0);
                b43_phy_initg(dev);
-               phy->gmode = 1;
+               phy->gmode = true;
                b43_wireless_core_reset(dev, 1);
        }
 
@@ -2613,7 +2613,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
                                      gphy->radio_off_context.rfover);
                        b43_phy_write(dev, B43_PHY_RFOVERVAL,
                                      gphy->radio_off_context.rfoverval);
-                       gphy->radio_off_context.valid = 0;
+                       gphy->radio_off_context.valid = false;
                }
                channel = phy->channel;
                b43_gphy_channel_switch(dev, 6, 1);
@@ -2626,7 +2626,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
                rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL);
                gphy->radio_off_context.rfover = rfover;
                gphy->radio_off_context.rfoverval = rfoverval;
-               gphy->radio_off_context.valid = 1;
+               gphy->radio_off_context.valid = true;
                b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
                b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
        }
@@ -2711,10 +2711,10 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
        if ((phy->rev == 0) || (!phy->gmode))
                return -ENODEV;
 
-       gphy->aci_wlan_automatic = 0;
+       gphy->aci_wlan_automatic = false;
        switch (mode) {
        case B43_INTERFMODE_AUTOWLAN:
-               gphy->aci_wlan_automatic = 1;
+               gphy->aci_wlan_automatic = true;
                if (gphy->aci_enable)
                        mode = B43_INTERFMODE_MANUALWLAN;
                else
@@ -2735,8 +2735,8 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
                b43_radio_interference_mitigation_disable(dev, currentmode);
 
        if (mode == B43_INTERFMODE_NONE) {
-               gphy->aci_enable = 0;
-               gphy->aci_hw_rssi = 0;
+               gphy->aci_enable = false;
+               gphy->aci_hw_rssi = false;
        } else
                b43_radio_interference_mitigation_enable(dev, mode);
        gphy->interfmode = mode;
index f93d66b..3ae2856 100644 (file)
@@ -736,9 +736,9 @@ static void lpphy_set_deaf(struct b43_wldev *dev, bool user)
        struct b43_phy_lp *lpphy = dev->phy.lp;
 
        if (user)
-               lpphy->crs_usr_disable = 1;
+               lpphy->crs_usr_disable = true;
        else
-               lpphy->crs_sys_disable = 1;
+               lpphy->crs_sys_disable = true;
        b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80);
 }
 
@@ -747,9 +747,9 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
        struct b43_phy_lp *lpphy = dev->phy.lp;
 
        if (user)
-               lpphy->crs_usr_disable = 0;
+               lpphy->crs_usr_disable = false;
        else
-               lpphy->crs_sys_disable = 0;
+               lpphy->crs_sys_disable = false;
 
        if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
                if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
index b17d9b6..f1a7e58 100644 (file)
@@ -228,10 +228,98 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
 static void b43_radio_2056_setup(struct b43_wldev *dev,
                                const struct b43_nphy_channeltab_entry_rev3 *e)
 {
+       struct ssb_sprom *sprom = dev->dev->bus_sprom;
+       enum ieee80211_band band = b43_current_band(dev->wl);
+       u16 offset;
+       u8 i;
+       u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
+
        B43_WARN_ON(dev->phy.rev < 3);
 
        b43_chantab_radio_2056_upload(dev, e);
-       /* TODO */
+       b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
+
+       if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
+           b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+               b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+               if (dev->dev->chip_id == 0x4716) {
+                       b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
+                       b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
+               } else {
+                       b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B);
+                       b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
+               }
+       }
+       if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
+           b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+               b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+               b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+               b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
+               b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
+       }
+
+       if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
+               for (i = 0; i < 2; i++) {
+                       offset = i ? B2056_TX1 : B2056_TX0;
+                       if (dev->phy.rev >= 5) {
+                               b43_radio_write(dev,
+                                       offset | B2056_TX_PADG_IDAC, 0xcc);
+
+                               if (dev->dev->chip_id == 0x4716) {
+                                       bias = 0x40;
+                                       cbias = 0x45;
+                                       pag_boost = 0x5;
+                                       pgag_boost = 0x33;
+                                       mixg_boost = 0x55;
+                               } else {
+                                       bias = 0x25;
+                                       cbias = 0x20;
+                                       pag_boost = 0x4;
+                                       pgag_boost = 0x03;
+                                       mixg_boost = 0x65;
+                               }
+                               padg_boost = 0x77;
+
+                               b43_radio_write(dev,
+                                       offset | B2056_TX_INTPAG_IMAIN_STAT,
+                                       bias);
+                               b43_radio_write(dev,
+                                       offset | B2056_TX_INTPAG_IAUX_STAT,
+                                       bias);
+                               b43_radio_write(dev,
+                                       offset | B2056_TX_INTPAG_CASCBIAS,
+                                       cbias);
+                               b43_radio_write(dev,
+                                       offset | B2056_TX_INTPAG_BOOST_TUNE,
+                                       pag_boost);
+                               b43_radio_write(dev,
+                                       offset | B2056_TX_PGAG_BOOST_TUNE,
+                                       pgag_boost);
+                               b43_radio_write(dev,
+                                       offset | B2056_TX_PADG_BOOST_TUNE,
+                                       padg_boost);
+                               b43_radio_write(dev,
+                                       offset | B2056_TX_MIXG_BOOST_TUNE,
+                                       mixg_boost);
+                       } else {
+                               bias = dev->phy.is_40mhz ? 0x40 : 0x20;
+                               b43_radio_write(dev,
+                                       offset | B2056_TX_INTPAG_IMAIN_STAT,
+                                       bias);
+                               b43_radio_write(dev,
+                                       offset | B2056_TX_INTPAG_IAUX_STAT,
+                                       bias);
+                               b43_radio_write(dev,
+                                       offset | B2056_TX_INTPAG_CASCBIAS,
+                                       0x30);
+                       }
+                       b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
+               }
+       } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
+               /* TODO */
+       }
+
        udelay(50);
        /* VCO calibration */
        b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00);
@@ -387,7 +475,9 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
        if (nphy->hang_avoid)
                b43_nphy_stay_in_carrier_search(dev, 1);
 
-       if (dev->phy.rev >= 3) {
+       if (dev->phy.rev >= 7) {
+               txpi[0] = txpi[1] = 30;
+       } else if (dev->phy.rev >= 3) {
                txpi[0] = 40;
                txpi[1] = 40;
        } else if (sprom->revision < 4) {
@@ -411,6 +501,9 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
                        txpi[1] = 91;
                }
        }
+       if (dev->phy.rev < 7 &&
+           (txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 10))
+               txpi[0] = txpi[1] = 91;
 
        /*
        for (i = 0; i < 2; i++) {
@@ -421,15 +514,31 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
 
        for (i = 0; i < 2; i++) {
                if (dev->phy.rev >= 3) {
-                       /* FIXME: support 5GHz */
-                       txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+                       if (b43_nphy_ipa(dev)) {
+                               txgain = *(b43_nphy_get_ipa_gain_table(dev) +
+                                               txpi[i]);
+                       } else if (b43_current_band(dev->wl) ==
+                                  IEEE80211_BAND_5GHZ) {
+                               /* FIXME: use 5GHz tables */
+                               txgain =
+                                       b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+                       } else {
+                               if (dev->phy.rev >= 5 &&
+                                   sprom->fem.ghz5.extpa_gain == 3)
+                                       ; /* FIXME: 5GHz_txgain_HiPwrEPA */
+                               txgain =
+                                       b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+                       }
                        radio_gain = (txgain >> 16) & 0x1FFFF;
                } else {
                        txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
                        radio_gain = (txgain >> 16) & 0x1FFF;
                }
 
-               dac_gain = (txgain >> 8) & 0x3F;
+               if (dev->phy.rev >= 7)
+                       dac_gain = (txgain >> 8) & 0x7;
+               else
+                       dac_gain = (txgain >> 8) & 0x3F;
                bbmult = txgain & 0xFF;
 
                if (dev->phy.rev >= 3) {
@@ -459,7 +568,8 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
                        u32 tmp32;
                        u16 reg = (i == 0) ?
                                B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
-                       tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, txpi[i]));
+                       tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i,
+                                                             576 + txpi[i]));
                        b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
                        b43_phy_set(dev, reg, 0x4);
                }
@@ -1493,8 +1603,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
        /* TX to RX */
-       u8 tx2rx_events[9] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
-       u8 tx2rx_delays[9] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+       u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
+       u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
        /* RX to TX */
        u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
                                        0x1F };
@@ -1505,6 +1615,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        u16 tmp16;
        u32 tmp32;
 
+       b43_phy_write(dev, 0x23f, 0x1f8);
+       b43_phy_write(dev, 0x240, 0x1f8);
+
        tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
        tmp32 &= 0xffffff;
        b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
@@ -1520,12 +1633,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_phy_write(dev, 0x2AE, 0x000C);
 
        /* TX to RX */
-       b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 9);
+       b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays,
+                                ARRAY_SIZE(tx2rx_events));
 
        /* RX to TX */
        if (b43_nphy_ipa(dev))
-               b43_nphy_set_rf_sequence(dev, 1, rx2tx_events_ipa,
-                                        rx2tx_delays_ipa, 9);
+               b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
+                               rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
        if (nphy->hw_phyrxchain != 3 &&
            nphy->hw_phyrxchain != nphy->hw_phytxchain) {
                if (b43_nphy_ipa(dev)) {
@@ -1533,7 +1647,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
                        rx2tx_delays[6] = 1;
                        rx2tx_events[7] = 0x1F;
                }
-               b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 9);
+               b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
+                                        ARRAY_SIZE(rx2tx_events));
        }
 
        tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
@@ -1547,8 +1662,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
 
        b43_nphy_gain_ctrl_workarounds(dev);
 
-       b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
-       b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
+       b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
+       b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
 
        /* TODO */
 
@@ -1560,6 +1675,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
        b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
        b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+       b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
+       b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
        b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
        b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
 
@@ -1584,18 +1701,18 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
                                0x70);
        }
 
-       b43_phy_write(dev, 0x224, 0x039C);
-       b43_phy_write(dev, 0x225, 0x0357);
-       b43_phy_write(dev, 0x226, 0x0317);
-       b43_phy_write(dev, 0x227, 0x02D7);
-       b43_phy_write(dev, 0x228, 0x039C);
-       b43_phy_write(dev, 0x229, 0x0357);
-       b43_phy_write(dev, 0x22A, 0x0317);
-       b43_phy_write(dev, 0x22B, 0x02D7);
-       b43_phy_write(dev, 0x22C, 0x039C);
-       b43_phy_write(dev, 0x22D, 0x0357);
-       b43_phy_write(dev, 0x22E, 0x0317);
-       b43_phy_write(dev, 0x22F, 0x02D7);
+       b43_phy_write(dev, 0x224, 0x03eb);
+       b43_phy_write(dev, 0x225, 0x03eb);
+       b43_phy_write(dev, 0x226, 0x0341);
+       b43_phy_write(dev, 0x227, 0x0341);
+       b43_phy_write(dev, 0x228, 0x042b);
+       b43_phy_write(dev, 0x229, 0x042b);
+       b43_phy_write(dev, 0x22a, 0x0381);
+       b43_phy_write(dev, 0x22b, 0x0381);
+       b43_phy_write(dev, 0x22c, 0x042b);
+       b43_phy_write(dev, 0x22d, 0x042b);
+       b43_phy_write(dev, 0x22e, 0x0381);
+       b43_phy_write(dev, 0x22f, 0x0381);
 }
 
 static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -3258,7 +3375,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
 
        if (dev->phy.rev >= 4) {
                avoid = nphy->hang_avoid;
-               nphy->hang_avoid = 0;
+               nphy->hang_avoid = false;
        }
 
        b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
@@ -3368,7 +3485,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
 
                        if (phy6or5x && updated[core] == 0) {
                                b43_nphy_update_tx_cal_ladder(dev, core);
-                               updated[core] = 1;
+                               updated[core] = true;
                        }
 
                        tmp = (params[core].ncorr[type] << 8) | 0x66;
@@ -3928,6 +4045,76 @@ int b43_phy_initn(struct b43_wldev *dev)
        return 0;
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
+static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
+{
+       struct bcma_drv_cc *cc;
+       u32 pmu_ctl;
+
+       switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+       case B43_BUS_BCMA:
+               cc = &dev->dev->bdev->bus->drv_cc;
+               if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) {
+                       if (avoid) {
+                               bcma_chipco_pll_write(cc, 0x0, 0x11500010);
+                               bcma_chipco_pll_write(cc, 0x1, 0x000C0C06);
+                               bcma_chipco_pll_write(cc, 0x2, 0x0F600a08);
+                               bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+                               bcma_chipco_pll_write(cc, 0x4, 0x2001E920);
+                               bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+                       } else {
+                               bcma_chipco_pll_write(cc, 0x0, 0x11100010);
+                               bcma_chipco_pll_write(cc, 0x1, 0x000c0c06);
+                               bcma_chipco_pll_write(cc, 0x2, 0x03000a08);
+                               bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+                               bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+                               bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+                       }
+                       pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+               } else if (dev->dev->chip_id == 0x4716) {
+                       if (avoid) {
+                               bcma_chipco_pll_write(cc, 0x0, 0x11500060);
+                               bcma_chipco_pll_write(cc, 0x1, 0x080C0C06);
+                               bcma_chipco_pll_write(cc, 0x2, 0x0F600000);
+                               bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+                               bcma_chipco_pll_write(cc, 0x4, 0x2001E924);
+                               bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+                       } else {
+                               bcma_chipco_pll_write(cc, 0x0, 0x11100060);
+                               bcma_chipco_pll_write(cc, 0x1, 0x080c0c06);
+                               bcma_chipco_pll_write(cc, 0x2, 0x03000000);
+                               bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+                               bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+                               bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+                       }
+                       pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD |
+                                 BCMA_CC_PMU_CTL_NOILPONW;
+               } else if (dev->dev->chip_id == 0x4322 ||
+                          dev->dev->chip_id == 0x4340 ||
+                          dev->dev->chip_id == 0x4341) {
+                       bcma_chipco_pll_write(cc, 0x0, 0x11100070);
+                       bcma_chipco_pll_write(cc, 0x1, 0x1014140a);
+                       bcma_chipco_pll_write(cc, 0x5, 0x88888854);
+                       if (avoid)
+                               bcma_chipco_pll_write(cc, 0x2, 0x05201828);
+                       else
+                               bcma_chipco_pll_write(cc, 0x2, 0x05001828);
+                       pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+               } else {
+                       return;
+               }
+               bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl);
+               break;
+#endif
+#ifdef CONFIG_B43_SSB
+       case B43_BUS_SSB:
+               /* FIXME */
+               break;
+#endif
+       }
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
 static void b43_nphy_channel_setup(struct b43_wldev *dev,
                                const struct b43_phy_n_sfo_cfg *e,
@@ -3935,6 +4122,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
 {
        struct b43_phy *phy = &dev->phy;
        struct b43_phy_n *nphy = dev->phy.n;
+       int ch = new_channel->hw_value;
 
        u16 old_band_5ghz;
        u32 tmp32;
@@ -3974,8 +4162,41 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
 
        b43_nphy_tx_lp_fbw(dev);
 
-       if (dev->phy.rev >= 3 && 0) {
-               /* TODO */
+       if (dev->phy.rev >= 3 &&
+           dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) {
+               bool avoid = false;
+               if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) {
+                       avoid = true;
+               } else if (!b43_channel_type_is_40mhz(phy->channel_type)) {
+                       if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
+                               avoid = true;
+               } else { /* 40MHz */
+                       if (nphy->aband_spurwar_en &&
+                           (ch == 38 || ch == 102 || ch == 118))
+                               avoid = dev->dev->chip_id == 0x4716;
+               }
+
+               b43_nphy_pmu_spur_avoid(dev, avoid);
+
+               if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 ||
+                   dev->dev->chip_id == 43225) {
+                       b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW,
+                                   avoid ? 0x5341 : 0x8889);
+                       b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+               }
+
+               if (dev->phy.rev == 3 || dev->phy.rev == 4)
+                       ; /* TODO: reset PLL */
+
+               if (avoid)
+                       b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX);
+               else
+                       b43_phy_mask(dev, B43_NPHY_BBCFG,
+                                    ~B43_NPHY_BBCFG_RSTRX & 0xFFFF);
+
+               b43_nphy_reset_cca(dev);
+
+               /* wl sets useless phy_isspuravoid here */
        }
 
        b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
@@ -4055,10 +4276,13 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
        struct b43_phy_n *nphy = phy->n;
+       struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
        memset(nphy, 0, sizeof(*nphy));
 
        nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
+       nphy->spur_avoid = (phy->rev >= 3) ?
+                               B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
        nphy->gain_boost = true; /* this way we follow wl, assume it is true */
        nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
        nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4067,6 +4291,38 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
         * 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */
        nphy->tx_pwr_idx[0] = 128;
        nphy->tx_pwr_idx[1] = 128;
+
+       /* Hardware TX power control and 5GHz power gain */
+       nphy->txpwrctrl = false;
+       nphy->pwg_gain_5ghz = false;
+       if (dev->phy.rev >= 3 ||
+           (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
+            (dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) {
+               nphy->txpwrctrl = true;
+               nphy->pwg_gain_5ghz = true;
+       } else if (sprom->revision >= 4) {
+               if (dev->phy.rev >= 2 &&
+                   (sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) {
+                       nphy->txpwrctrl = true;
+#ifdef CONFIG_B43_SSB
+                       if (dev->dev->bus_type == B43_BUS_SSB &&
+                           dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) {
+                               struct pci_dev *pdev =
+                                       dev->dev->sdev->bus->host_pci;
+                               if (pdev->device == 0x4328 ||
+                                   pdev->device == 0x432a)
+                                       nphy->pwg_gain_5ghz = true;
+                       }
+#endif
+               } else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) {
+                       nphy->pwg_gain_5ghz = true;
+               }
+       }
+
+       if (dev->phy.rev >= 3) {
+               nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
+               nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
+       }
 }
 
 static void b43_nphy_op_free(struct b43_wldev *dev)
index fbf5202..56ef97b 100644 (file)
 
 struct b43_wldev;
 
+enum b43_nphy_spur_avoid {
+       B43_SPUR_AVOID_DISABLE,
+       B43_SPUR_AVOID_AUTO,
+       B43_SPUR_AVOID_FORCE,
+};
+
 struct b43_chanspec {
        u16 center_freq;
        enum nl80211_channel_type channel_type;
@@ -785,6 +791,7 @@ struct b43_phy_n {
        u16 mphase_txcal_bestcoeffs[11];
 
        bool txpwrctrl;
+       bool pwg_gain_5ghz;
        u8 tx_pwr_idx[2];
        u16 adj_pwr_tbl[84];
        u16 txcal_bbmult;
@@ -803,6 +810,7 @@ struct b43_phy_n {
        u16 classifier_state;
        u16 clip_state[2];
 
+       enum b43_nphy_spur_avoid spur_avoid;
        bool aband_spurwar_en;
        bool gband_spurwar_en;
 
index fcff923..d07b412 100644 (file)
@@ -539,7 +539,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
                /* Not enough memory on the queue. */
                err = -EBUSY;
                ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
-               q->stopped = 1;
+               q->stopped = true;
                goto out;
        }
 
@@ -566,7 +566,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
            (q->free_packet_slots == 0)) {
                /* The queue is full. */
                ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
-               q->stopped = 1;
+               q->stopped = true;
        }
 
 out:
@@ -601,7 +601,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
 
        if (q->stopped) {
                ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
-               q->stopped = 0;
+               q->stopped = false;
        }
 }
 
index a01f776..ce037fb 100644 (file)
@@ -1572,14 +1572,14 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
        [B2056_SYN_PLL_XTAL5]           = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
        [B2056_SYN_PLL_XTAL6]           = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
        [B2056_SYN_PLL_REFDIV]          = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
-       [B2056_SYN_PLL_PFD]             = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+       [B2056_SYN_PLL_PFD]             = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
        [B2056_SYN_PLL_CP1]             = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
-       [B2056_SYN_PLL_CP2]             = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+       [B2056_SYN_PLL_CP2]             = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
        [B2056_SYN_PLL_CP3]             = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
-       [B2056_SYN_PLL_LOOPFILTER1]     = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
-       [B2056_SYN_PLL_LOOPFILTER2]     = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+       [B2056_SYN_PLL_LOOPFILTER1]     = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+       [B2056_SYN_PLL_LOOPFILTER2]     = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
        [B2056_SYN_PLL_LOOPFILTER3]     = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
-       [B2056_SYN_PLL_LOOPFILTER4]     = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+       [B2056_SYN_PLL_LOOPFILTER4]     = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
        [B2056_SYN_PLL_LOOPFILTER5]     = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
        [B2056_SYN_PLL_MMD1]            = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
        [B2056_SYN_PLL_MMD2]            = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
@@ -9055,6 +9055,21 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
                                B2056_RX1, pts->rx, pts->rx_length);
 }
 
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
+{
+       struct b2056_inittabs_pts *pts;
+       const struct b2056_inittab_entry *e;
+
+       if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+               B43_WARN_ON(1);
+               return;
+       }
+       pts = &b2056_inittabs[dev->phy.rev];
+       e = &pts->syn[B2056_SYN_PLL_CP2];
+
+       b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
+}
+
 const struct b43_nphy_channeltab_entry_rev3 *
 b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
 {
index a7159d8..5b86673 100644 (file)
@@ -1090,6 +1090,7 @@ struct b43_nphy_channeltab_entry_rev3 {
 
 void b2056_upload_inittabs(struct b43_wldev *dev,
                           bool ghz5, bool ignore_uploadflag);
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5);
 
 /* Get the NPHY Channel Switch Table entry for a channel.
  * Returns NULL on failure to find an entry. */
index 7b326f2..3252560 100644 (file)
@@ -2171,6 +2171,48 @@ static const u16 b43_ntab_loftlt1_r3[] = {
        0x0000, 0x0000,
 };
 
+/* volatile  tables, PHY revision >= 3 */
+
+/* indexed by antswctl2g */
+static const u16 b43_ntab_antswctl2g_r3[4][32] = {
+       {
+               0x0082, 0x0082, 0x0211, 0x0222, 0x0328,
+               0x0000, 0x0000, 0x0000, 0x0144, 0x0000,
+               0x0000, 0x0000, 0x0188, 0x0000, 0x0000,
+               0x0000, 0x0082, 0x0082, 0x0211, 0x0222,
+               0x0328, 0x0000, 0x0000, 0x0000, 0x0144,
+               0x0000, 0x0000, 0x0000, 0x0188, 0x0000,
+               0x0000, 0x0000,
+       },
+       {
+               0x0022, 0x0022, 0x0011, 0x0022, 0x0022,
+               0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+               0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+               0x0000, 0x0022, 0x0022, 0x0011, 0x0022,
+               0x0022, 0x0000, 0x0000, 0x0000, 0x0011,
+               0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+               0x0000, 0x0000,
+       },
+       {
+               0x0088, 0x0088, 0x0044, 0x0088, 0x0088,
+               0x0000, 0x0000, 0x0000, 0x0044, 0x0000,
+               0x0000, 0x0000, 0x0088, 0x0000, 0x0000,
+               0x0000, 0x0088, 0x0088, 0x0044, 0x0088,
+               0x0088, 0x0000, 0x0000, 0x0000, 0x0044,
+               0x0000, 0x0000, 0x0000, 0x0088, 0x0000,
+               0x0000, 0x0000,
+       },
+       {
+               0x0022, 0x0022, 0x0011, 0x0022, 0x0000,
+               0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+               0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+               0x03cc, 0x0022, 0x0022, 0x0011, 0x0022,
+               0x0000, 0x0000, 0x0000, 0x0000, 0x0011,
+               0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+               0x0000, 0x03cc,
+       }
+};
+
 /* TX gain tables */
 const u32 b43_ntab_tx_gain_rev0_1_2[] = {
        0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
@@ -2652,7 +2694,7 @@ const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
 const s16 tbl_tx_filter_coef_rev4[7][15] = {
        {  -377,   137,  -407,   208, -1527,
            956,    93,   186,    93,   230,
-           -44,   230,    20,  -191,   201 },
+           -44,   230,   201,  -191,   201 },
        {   -77,    20,   -98,    49,   -93,
             60,    56,   111,    56,    26,
             -5,    26,    34,   -32,    34 },
@@ -2838,9 +2880,8 @@ u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
                break;
        case B43_NTAB_32BIT:
                b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
-               value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
-               value <<= 16;
-               value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+               value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+               value |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
                break;
        default:
                B43_WARN_ON(1);
@@ -2864,6 +2905,12 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
        b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
 
        for (i = 0; i < nr_elements; i++) {
+               /* Auto increment broken + caching issue on BCM43224? */
+               if (dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) {
+                       b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+                       b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+               }
+
                switch (type) {
                case B43_NTAB_8BIT:
                        *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
@@ -2874,9 +2921,10 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
                        data += 2;
                        break;
                case B43_NTAB_32BIT:
-                       *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
-                       *((u32 *)data) <<= 16;
-                       *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+                       *((u32 *)data) =
+                               b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+                       *((u32 *)data) |=
+                               b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
                        data += 4;
                        break;
                default:
@@ -2932,6 +2980,13 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
        b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
 
        for (i = 0; i < nr_elements; i++) {
+               /* Auto increment broken + caching issue on BCM43224? */
+               if ((offset >> 10) == 9 && dev->dev->chip_id == 43224 &&
+                   dev->dev->chip_rev == 1) {
+                       b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+                       b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+               }
+
                switch (type) {
                case B43_NTAB_8BIT:
                        value = *data;
@@ -2999,6 +3054,8 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
        } while (0)
 void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
 {
+       struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
        /* Static tables */
        ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
        ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
@@ -3029,7 +3086,11 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
        ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
 
        /* Volatile tables */
-       /* TODO */
+       if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3))
+               ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3,
+                              b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]);
+       else
+               B43_WARN_ON(1);
 }
 
 struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
index a81696b..97038c4 100644 (file)
@@ -126,26 +126,29 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
 #define B43_NTAB_C1_LOFEEDTH           B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
 #define B43_NTAB_C1_LOFEEDTH_SIZE      128
 
+/* Volatile N-PHY tables, PHY revision >= 3 */
+#define B43_NTAB_ANT_SW_CTL_R3         B43_NTAB16( 9,   0) /* antenna software control */
+
 /* Static N-PHY tables, PHY revision >= 3 */
-#define B43_NTAB_FRAMESTRUCT_R3                B43_NTAB32(10, 000) /* frame struct  */
-#define B43_NTAB_PILOT_R3              B43_NTAB16(11, 000) /* pilot  */
-#define B43_NTAB_TMAP_R3               B43_NTAB32(12, 000) /* TM AP  */
-#define B43_NTAB_INTLEVEL_R3           B43_NTAB32(13, 000) /* INT LV  */
-#define B43_NTAB_TDTRN_R3              B43_NTAB32(14, 000) /* TD TRN  */
-#define B43_NTAB_NOISEVAR0_R3          B43_NTAB32(16, 000) /* noise variance 0  */
+#define B43_NTAB_FRAMESTRUCT_R3                B43_NTAB32(10,   0) /* frame struct  */
+#define B43_NTAB_PILOT_R3              B43_NTAB16(11,   0) /* pilot  */
+#define B43_NTAB_TMAP_R3               B43_NTAB32(12,   0) /* TM AP  */
+#define B43_NTAB_INTLEVEL_R3           B43_NTAB32(13,   0) /* INT LV  */
+#define B43_NTAB_TDTRN_R3              B43_NTAB32(14,   0) /* TD TRN  */
+#define B43_NTAB_NOISEVAR0_R3          B43_NTAB32(16,   0) /* noise variance 0  */
 #define B43_NTAB_NOISEVAR1_R3          B43_NTAB32(16, 128) /* noise variance 1  */
-#define B43_NTAB_MCS_R3                        B43_NTAB16(18, 000) /* MCS  */
+#define B43_NTAB_MCS_R3                        B43_NTAB16(18,   0) /* MCS  */
 #define B43_NTAB_TDI20A0_R3            B43_NTAB32(19, 128) /* TDI 20/0  */
 #define B43_NTAB_TDI20A1_R3            B43_NTAB32(19, 256) /* TDI 20/1  */
 #define B43_NTAB_TDI40A0_R3            B43_NTAB32(19, 640) /* TDI 40/0  */
 #define B43_NTAB_TDI40A1_R3            B43_NTAB32(19, 768) /* TDI 40/1  */
-#define B43_NTAB_PILOTLT_R3            B43_NTAB32(20, 000) /* PLT lookup  */
-#define B43_NTAB_CHANEST_R3            B43_NTAB32(22, 000) /* channel estimate  */
-#define B43_NTAB_FRAMELT_R3            B43_NTAB8 (24, 000) /* frame lookup  */
-#define B43_NTAB_C0_ESTPLT_R3          B43_NTAB8 (26, 000) /* estimated power lookup 0  */
-#define B43_NTAB_C1_ESTPLT_R3          B43_NTAB8 (27, 000) /* estimated power lookup 1  */
-#define B43_NTAB_C0_ADJPLT_R3          B43_NTAB8 (26, 064) /* adjusted power lookup 0  */
-#define B43_NTAB_C1_ADJPLT_R3          B43_NTAB8 (27, 064) /* adjusted power lookup 1  */
+#define B43_NTAB_PILOTLT_R3            B43_NTAB32(20,   0) /* PLT lookup  */
+#define B43_NTAB_CHANEST_R3            B43_NTAB32(22,   0) /* channel estimate  */
+#define B43_NTAB_FRAMELT_R3             B43_NTAB8(24,   0) /* frame lookup  */
+#define B43_NTAB_C0_ESTPLT_R3           B43_NTAB8(26,   0) /* estimated power lookup 0  */
+#define B43_NTAB_C1_ESTPLT_R3           B43_NTAB8(27,   0) /* estimated power lookup 1  */
+#define B43_NTAB_C0_ADJPLT_R3           B43_NTAB8(26,  64) /* adjusted power lookup 0  */
+#define B43_NTAB_C1_ADJPLT_R3           B43_NTAB8(27,  64) /* adjusted power lookup 1  */
 #define B43_NTAB_C0_GAINCTL_R3         B43_NTAB32(26, 192) /* gain control lookup 0  */
 #define B43_NTAB_C1_GAINCTL_R3         B43_NTAB32(27, 192) /* gain control lookup 1  */
 #define B43_NTAB_C0_IQLT_R3            B43_NTAB32(26, 320) /* I/Q lookup 0  */
index 5f77cbe..2c53678 100644 (file)
@@ -874,7 +874,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
                              struct ieee80211_tx_info *report,
                              const struct b43_txstatus *status)
 {
-       bool frame_success = 1;
+       bool frame_success = true;
        int retry_limit;
 
        /* preserve the confiured retry limit before clearing the status
@@ -890,7 +890,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
                /* The frame was not ACKed... */
                if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) {
                        /* ...but we expected an ACK. */
-                       frame_success = 0;
+                       frame_success = false;
                }
        }
        if (status->frame_count == 0) {
index c5535ad..1ee31c5 100644 (file)
@@ -715,7 +715,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
        ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
        ring->index = controller_index;
        if (for_tx) {
-               ring->tx = 1;
+               ring->tx = true;
                ring->current_slot = -1;
        } else {
                if (ring->index == 0) {
@@ -806,7 +806,7 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
 static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
 {
        u64 orig_mask = mask;
-       bool fallback = 0;
+       bool fallback = false;
        int err;
 
        /* Try to set the DMA mask. If it fails, try falling back to a
@@ -820,12 +820,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
                }
                if (mask == DMA_BIT_MASK(64)) {
                        mask = DMA_BIT_MASK(32);
-                       fallback = 1;
+                       fallback = true;
                        continue;
                }
                if (mask == DMA_BIT_MASK(32)) {
                        mask = DMA_BIT_MASK(30);
-                       fallback = 1;
+                       fallback = true;
                        continue;
                }
                b43legacyerr(dev->wl, "The machine/kernel does not support "
@@ -858,7 +858,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
 #ifdef CONFIG_B43LEGACY_PIO
                b43legacywarn(dev->wl, "DMA for this device not supported. "
                        "Falling back to PIO\n");
-               dev->__using_pio = 1;
+               dev->__using_pio = true;
                return -EAGAIN;
 #else
                b43legacyerr(dev->wl, "DMA for this device not supported and "
@@ -1068,7 +1068,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
        memset(meta, 0, sizeof(*meta));
 
        meta->skb = skb;
-       meta->is_last_fragment = 1;
+       meta->is_last_fragment = true;
 
        meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
        /* create a bounce buffer in zone_dma on mapping failure. */
@@ -1187,7 +1187,7 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
            should_inject_overflow(ring)) {
                /* This TX ring is full. */
                ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
-               ring->stopped = 1;
+               ring->stopped = true;
                if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
                        b43legacydbg(dev->wl, "Stopped TX ring %d\n",
                               ring->index);
@@ -1286,7 +1286,7 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
        if (ring->stopped) {
                B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
                ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
-               ring->stopped = 0;
+               ring->stopped = false;
                if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
                        b43legacydbg(dev->wl, "Woke up TX ring %d\n",
                               ring->index);
index 2f1bfdc..fd45653 100644 (file)
@@ -203,11 +203,11 @@ void b43legacy_leds_init(struct b43legacy_wldev *dev)
                if (sprom[i] == 0xFF) {
                        /* There is no LED information in the SPROM
                         * for this LED. Hardcode it here. */
-                       activelow = 0;
+                       activelow = false;
                        switch (i) {
                        case 0:
                                behaviour = B43legacy_LED_ACTIVITY;
-                               activelow = 1;
+                               activelow = true;
                                if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
                                        behaviour = B43legacy_LED_RADIO_ALL;
                                break;
index 20f0243..200138c 100644 (file)
@@ -722,9 +722,9 @@ void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags)
        macctl &= ~B43legacy_MACCTL_GMODE;
        if (flags & B43legacy_TMSLOW_GMODE) {
                macctl |= B43legacy_MACCTL_GMODE;
-               dev->phy.gmode = 1;
+               dev->phy.gmode = true;
        } else
-               dev->phy.gmode = 0;
+               dev->phy.gmode = false;
        macctl |= B43legacy_MACCTL_IHR_ENABLED;
        b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
 }
@@ -811,7 +811,7 @@ static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev)
        if (dev->noisecalc.calculation_running)
                return;
        dev->noisecalc.channel_at_start = dev->phy.channel;
-       dev->noisecalc.calculation_running = 1;
+       dev->noisecalc.calculation_running = true;
        dev->noisecalc.nr_samples = 0;
 
        b43legacy_generate_noise_sample(dev);
@@ -873,7 +873,7 @@ static void handle_irq_noise(struct b43legacy_wldev *dev)
 
                dev->stats.link_noise = average;
 drop_calculation:
-               dev->noisecalc.calculation_running = 0;
+               dev->noisecalc.calculation_running = false;
                return;
        }
 generate_new:
@@ -889,7 +889,7 @@ static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev)
                        b43legacy_power_saving_ctl_bits(dev, -1, -1);
        }
        if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
-               dev->dfq_valid = 1;
+               dev->dfq_valid = true;
 }
 
 static void handle_irq_atim_end(struct b43legacy_wldev *dev)
@@ -898,7 +898,7 @@ static void handle_irq_atim_end(struct b43legacy_wldev *dev)
                b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
                                  b43legacy_read32(dev, B43legacy_MMIO_MACCMD)
                                  | B43legacy_MACCMD_DFQ_VALID);
-               dev->dfq_valid = 0;
+               dev->dfq_valid = false;
        }
 }
 
@@ -971,7 +971,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
        unsigned int i, len, variable_len;
        const struct ieee80211_mgmt *bcn;
        const u8 *ie;
-       bool tim_found = 0;
+       bool tim_found = false;
        unsigned int rate;
        u16 ctl;
        int antenna;
@@ -1019,7 +1019,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
                        /* A valid TIM is at least 4 bytes long. */
                        if (ie_len < 4)
                                break;
-                       tim_found = 1;
+                       tim_found = true;
 
                        tim_position = sizeof(struct b43legacy_plcp_hdr6);
                        tim_position += offsetof(struct ieee80211_mgmt,
@@ -1172,7 +1172,7 @@ static void b43legacy_upload_beacon0(struct b43legacy_wldev *dev)
         *        but we don't use that feature anyway. */
        b43legacy_write_probe_resp_template(dev, 0x268, 0x4A,
                                      &__b43legacy_ratetable[3]);
-       wl->beacon0_uploaded = 1;
+       wl->beacon0_uploaded = true;
 }
 
 static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
@@ -1182,7 +1182,7 @@ static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
        if (wl->beacon1_uploaded)
                return;
        b43legacy_write_beacon_template(dev, 0x468, 0x1A);
-       wl->beacon1_uploaded = 1;
+       wl->beacon1_uploaded = true;
 }
 
 static void handle_irq_beacon(struct b43legacy_wldev *dev)
@@ -1212,7 +1212,7 @@ static void handle_irq_beacon(struct b43legacy_wldev *dev)
        if (unlikely(wl->beacon_templates_virgin)) {
                /* We never uploaded a beacon before.
                 * Upload both templates now, but only mark one valid. */
-               wl->beacon_templates_virgin = 0;
+               wl->beacon_templates_virgin = false;
                b43legacy_upload_beacon0(dev);
                b43legacy_upload_beacon1(dev);
                cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
@@ -1275,8 +1275,8 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl)
        if (wl->current_beacon)
                dev_kfree_skb_any(wl->current_beacon);
        wl->current_beacon = beacon;
-       wl->beacon0_uploaded = 0;
-       wl->beacon1_uploaded = 0;
+       wl->beacon0_uploaded = false;
+       wl->beacon1_uploaded = false;
        ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
 }
 
@@ -2510,7 +2510,7 @@ static int find_wldev_for_phymode(struct b43legacy_wl *wl,
                if (d->phy.possible_phymodes & phymode) {
                        /* Ok, this device supports the PHY-mode.
                         * Set the gmode bit. */
-                       *gmode = 1;
+                       *gmode = true;
                        *dev = d;
 
                        return 0;
@@ -2546,7 +2546,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
        struct b43legacy_wldev *uninitialized_var(up_dev);
        struct b43legacy_wldev *down_dev;
        int err;
-       bool gmode = 0;
+       bool gmode = false;
        int prev_status;
 
        err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode);
@@ -3044,12 +3044,12 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
 
        /* Assume the radio is enabled. If it's not enabled, the state will
         * immediately get fixed on the first periodic work run. */
-       dev->radio_hw_enable = 1;
+       dev->radio_hw_enable = true;
 
        phy->savedpctlreg = 0xFFFF;
-       phy->aci_enable = 0;
-       phy->aci_wlan_automatic = 0;
-       phy->aci_hw_rssi = 0;
+       phy->aci_enable = false;
+       phy->aci_wlan_automatic = false;
+       phy->aci_hw_rssi = false;
 
        lo = phy->_lo_pairs;
        if (lo)
@@ -3081,7 +3081,7 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
 static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev)
 {
        /* Flags */
-       dev->dfq_valid = 0;
+       dev->dfq_valid = false;
 
        /* Stats */
        memset(&dev->stats, 0, sizeof(dev->stats));
@@ -3187,9 +3187,9 @@ static void prepare_phy_data_for_init(struct b43legacy_wldev *dev)
        phy->lofcal = 0xFFFF;
        phy->initval = 0xFFFF;
 
-       phy->aci_enable = 0;
-       phy->aci_wlan_automatic = 0;
-       phy->aci_hw_rssi = 0;
+       phy->aci_enable = false;
+       phy->aci_wlan_automatic = false;
+       phy->aci_hw_rssi = false;
 
        phy->antenna_diversity = 0xFFFF;
        memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
@@ -3355,7 +3355,7 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
        b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
 
        dev = wl->current_dev;
-       wl->operating = 1;
+       wl->operating = true;
        wl->vif = vif;
        wl->if_type = vif->type;
        memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -3389,7 +3389,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
        B43legacy_WARN_ON(wl->vif != vif);
        wl->vif = NULL;
 
-       wl->operating = 0;
+       wl->operating = false;
 
        spin_lock_irqsave(&wl->irq_lock, flags);
        b43legacy_adjust_opmode(dev);
@@ -3413,10 +3413,10 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
        memset(wl->bssid, 0, ETH_ALEN);
        memset(wl->mac_addr, 0, ETH_ALEN);
        wl->filter_flags = 0;
-       wl->beacon0_uploaded = 0;
-       wl->beacon1_uploaded = 0;
-       wl->beacon_templates_virgin = 1;
-       wl->radio_enabled = 1;
+       wl->beacon0_uploaded = false;
+       wl->beacon1_uploaded = false;
+       wl->beacon_templates_virgin = true;
+       wl->radio_enabled = true;
 
        mutex_lock(&wl->mutex);
 
@@ -3455,7 +3455,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
        if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
                b43legacy_wireless_core_stop(dev);
        b43legacy_wireless_core_exit(dev);
-       wl->radio_enabled = 0;
+       wl->radio_enabled = false;
        mutex_unlock(&wl->mutex);
 }
 
@@ -3614,7 +3614,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
                have_bphy = 1;
 
        dev->phy.gmode = (have_gphy || have_bphy);
-       dev->phy.radio_on = 1;
+       dev->phy.radio_on = true;
        tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
        b43legacy_wireless_core_reset(dev, tmp);
 
@@ -3705,7 +3705,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
                     (void (*)(unsigned long))b43legacy_interrupt_tasklet,
                     (unsigned long)wldev);
        if (modparam_pio)
-               wldev->__using_pio = 1;
+               wldev->__using_pio = true;
        INIT_LIST_HEAD(&wldev->list);
 
        err = b43legacy_wireless_core_attach(wldev);
index 475eb14..fcbafcd 100644 (file)
@@ -1067,7 +1067,7 @@ b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev,
                if (b43legacy_phy_read(dev, 0x0033) & 0x0800)
                        break;
 
-               phy->aci_enable = 1;
+               phy->aci_enable = true;
 
                phy_stacksave(B43legacy_PHY_RADIO_BITFIELD);
                phy_stacksave(B43legacy_PHY_G_CRS);
@@ -1279,7 +1279,7 @@ b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev,
                if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800))
                        break;
 
-               phy->aci_enable = 0;
+               phy->aci_enable = false;
 
                phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD);
                phy_stackrestore(B43legacy_PHY_G_CRS);
@@ -1346,10 +1346,10 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
            (phy->rev == 0) || (!phy->gmode))
                return -ENODEV;
 
-       phy->aci_wlan_automatic = 0;
+       phy->aci_wlan_automatic = false;
        switch (mode) {
        case B43legacy_RADIO_INTERFMODE_AUTOWLAN:
-               phy->aci_wlan_automatic = 1;
+               phy->aci_wlan_automatic = true;
                if (phy->aci_enable)
                        mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN;
                else
@@ -1371,8 +1371,8 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
                                                                currentmode);
 
        if (mode == B43legacy_RADIO_INTERFMODE_NONE) {
-               phy->aci_enable = 0;
-               phy->aci_hw_rssi = 0;
+               phy->aci_enable = false;
+               phy->aci_hw_rssi = false;
        } else
                b43legacy_radio_interference_mitigation_enable(dev, mode);
        phy->interfmode = mode;
@@ -2102,7 +2102,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
                                            phy->radio_off_context.rfover);
                        b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
                                            phy->radio_off_context.rfoverval);
-                       phy->radio_off_context.valid = 0;
+                       phy->radio_off_context.valid = false;
                }
                channel = phy->channel;
                err = b43legacy_radio_selectchannel(dev,
@@ -2113,7 +2113,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
        default:
                B43legacy_BUG_ON(1);
        }
-       phy->radio_on = 1;
+       phy->radio_on = true;
 }
 
 void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
@@ -2131,14 +2131,14 @@ void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
                if (!force) {
                        phy->radio_off_context.rfover = rfover;
                        phy->radio_off_context.rfoverval = rfoverval;
-                       phy->radio_off_context.valid = 1;
+                       phy->radio_off_context.valid = true;
                }
                b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C);
                b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
                                    rfoverval & 0xFF73);
        } else
                b43legacy_phy_write(dev, 0x0015, 0xAA00);
-       phy->radio_on = 0;
+       phy->radio_on = false;
        b43legacydbg(dev->wl, "Radio initialized\n");
 }
 
index 2069fc8..8f54c2e 100644 (file)
@@ -3,9 +3,8 @@ config BRCMUTIL
 
 config BRCMSMAC
        tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
-       depends on PCI
        depends on MAC80211
-       depends on BCMA=n
+       depends on BCMA
        select BRCMUTIL
        select FW_LOADER
        select CRC_CCITT
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h b/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
deleted file mode 100644 (file)
index cecb5e5..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmchip_h_
-#define _bcmchip_h_
-
-/* bcm4329 */
-/* firmware name */
-#define BCM4329_FW_NAME                        "brcm/bcm4329-fullmac-4.bin"
-#define BCM4329_NV_NAME                        "brcm/bcm4329-fullmac-4.txt"
-
-#endif                         /* _bcmchip_h_ */
index 89ff94d..6c85d66 100644 (file)
@@ -222,19 +222,12 @@ bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
        return sdiodev->regfail;
 }
 
-int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags,
-                     u8 *buf, uint nbytes, struct sk_buff *pkt)
+static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
+                                    uint flags, uint width, u32 *addr)
 {
-       int status;
-       uint incr_fix;
-       uint width;
-       uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+       uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
        int err = 0;
 
-       brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
-
        /* Async not implemented yet */
        if (flags & SDIO_REQ_ASYNC)
                return -ENOTSUPP;
@@ -247,29 +240,114 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
                sdiodev->sbwad = bar0;
        }
 
-       addr &= SBSDIO_SB_OFT_ADDR_MASK;
+       *addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+       if (width == 4)
+               *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+       return 0;
+}
+
+int
+brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                     uint flags, u8 *buf, uint nbytes)
+{
+       struct sk_buff *mypkt;
+       int err;
+
+       mypkt = brcmu_pkt_buf_get_skb(nbytes);
+       if (!mypkt) {
+               brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+                         nbytes);
+               return -EIO;
+       }
+
+       err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
+       if (!err)
+               memcpy(buf, mypkt->data, nbytes);
+
+       brcmu_pkt_buf_free_skb(mypkt);
+       return err;
+}
+
+int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                     uint flags, struct sk_buff *pkt)
+{
+       uint incr_fix;
+       uint width;
+       int err = 0;
+
+       brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+                 fn, addr, pkt->len);
+
+       width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+       err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+       if (err)
+               return err;
 
        incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+       err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
+                                        fn, addr, pkt);
+
+       return err;
+}
+
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                           uint flags, struct sk_buff_head *pktq)
+{
+       uint incr_fix;
+       uint width;
+       int err = 0;
+
+       brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+                 fn, addr, pktq->qlen);
+
        width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
-       if (width == 4)
-               addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+       err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+       if (err)
+               return err;
 
-       status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
-                                           fn, addr, width, nbytes, buf, pkt);
+       incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+       err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
+                                       pktq);
 
-       return status;
+       return err;
 }
 
 int
 brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt)
+                     uint flags, u8 *buf, uint nbytes)
+{
+       struct sk_buff *mypkt;
+       int err;
+
+       mypkt = brcmu_pkt_buf_get_skb(nbytes);
+       if (!mypkt) {
+               brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+                         nbytes);
+               return -EIO;
+       }
+
+       memcpy(mypkt->data, buf, nbytes);
+       err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
+
+       brcmu_pkt_buf_free_skb(mypkt);
+       return err;
+
+}
+
+int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                     uint flags, struct sk_buff *pkt)
 {
        uint incr_fix;
        uint width;
        uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
        int err = 0;
 
-       brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
+       brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+                 fn, addr, pkt->len);
 
        /* Async not implemented yet */
        if (flags & SDIO_REQ_ASYNC)
@@ -291,18 +369,39 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
                addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
        return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
-                                         addr, width, nbytes, buf, pkt);
+                                         addr, pkt);
 }
 
 int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
                        u8 *buf, uint nbytes)
 {
+       struct sk_buff *mypkt;
+       bool write = rw ? SDIOH_WRITE : SDIOH_READ;
+       int err;
+
        addr &= SBSDIO_SB_OFT_ADDR_MASK;
        addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
-       return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
-               (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
-               addr, 4, nbytes, buf, NULL);
+       mypkt = brcmu_pkt_buf_get_skb(nbytes);
+       if (!mypkt) {
+               brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+                         nbytes);
+               return -EIO;
+       }
+
+       /* For a write, copy the buffer data into the packet. */
+       if (write)
+               memcpy(mypkt->data, buf, nbytes);
+
+       err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write,
+                                        SDIO_FUNC_1, addr, mypkt);
+
+       /* For a read, copy the packet data back to the buffer. */
+       if (!err && !write)
+               memcpy(buf, mypkt->data, nbytes);
+
+       brcmu_pkt_buf_free_skb(mypkt);
+       return err;
 }
 
 int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
@@ -333,7 +432,7 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
        sdiodev->sbwad = SI_ENUM_BASE;
 
        /* try to attach to the target device */
-       sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev);
+       sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
        if (!sdiodev->bus) {
                brcmf_dbg(ERROR, "device attach failed\n");
                ret = -ENODEV;
index bbaeb2d..b895f19 100644 (file)
@@ -40,6 +40,7 @@
 #define DMA_ALIGN_MASK 0x03
 
 #define SDIO_DEVICE_ID_BROADCOM_4329   0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330   0x4330
 
 #define SDIO_FUNC1_BLOCKSIZE           64
 #define SDIO_FUNC2_BLOCKSIZE           512
@@ -47,6 +48,7 @@
 /* devices we support, null terminated */
 static const struct sdio_device_id brcmf_sdmmc_ids[] = {
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+       {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
        { /* end: all zeroes */ },
 };
 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -204,62 +206,75 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
        return err_ret;
 }
 
+/* precondition: host controller is claimed */
 static int
-brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
-                          uint write, uint func, uint addr,
-                          struct sk_buff *pkt)
+brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
+                        uint func, uint addr, struct sk_buff *pkt, uint pktlen)
+{
+       int err_ret = 0;
+
+       if ((write) && (!fifo)) {
+               err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+                                          ((u8 *) (pkt->data)), pktlen);
+       } else if (write) {
+               err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+                                          ((u8 *) (pkt->data)), pktlen);
+       } else if (fifo) {
+               err_ret = sdio_readsb(sdiodev->func[func],
+                                     ((u8 *) (pkt->data)), addr, pktlen);
+       } else {
+               err_ret = sdio_memcpy_fromio(sdiodev->func[func],
+                                            ((u8 *) (pkt->data)),
+                                            addr, pktlen);
+       }
+
+       return err_ret;
+}
+
+/*
+ * This function takes a queue of packets. The packets on the queue
+ * are assumed to be properly aligned by the caller.
+ */
+int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+                         uint write, uint func, uint addr,
+                         struct sk_buff_head *pktq)
 {
        bool fifo = (fix_inc == SDIOH_DATA_FIX);
        u32 SGCount = 0;
        int err_ret = 0;
 
-       struct sk_buff *pnext;
+       struct sk_buff *pkt;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
+       brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
        if (brcmf_pm_resume_error(sdiodev))
                return -EIO;
 
        /* Claim host controller */
        sdio_claim_host(sdiodev->func[func]);
-       for (pnext = pkt; pnext; pnext = pnext->next) {
-               uint pkt_len = pnext->len;
+
+       skb_queue_walk(pktq, pkt) {
+               uint pkt_len = pkt->len;
                pkt_len += 3;
                pkt_len &= 0xFFFFFFFC;
 
-               if ((write) && (!fifo)) {
-                       err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
-                                                  ((u8 *) (pnext->data)),
-                                                  pkt_len);
-               } else if (write) {
-                       err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
-                                                  ((u8 *) (pnext->data)),
-                                                  pkt_len);
-               } else if (fifo) {
-                       err_ret = sdio_readsb(sdiodev->func[func],
-                                             ((u8 *) (pnext->data)),
-                                             addr, pkt_len);
-               } else {
-                       err_ret = sdio_memcpy_fromio(sdiodev->func[func],
-                                                    ((u8 *) (pnext->data)),
-                                                    addr, pkt_len);
-               }
-
+               err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+                                                  addr, pkt, pkt_len);
                if (err_ret) {
                        brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
-                                 write ? "TX" : "RX", pnext, SGCount, addr,
+                                 write ? "TX" : "RX", pkt, SGCount, addr,
                                  pkt_len, err_ret);
                } else {
                        brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
-                                 write ? "TX" : "RX", pnext, SGCount, addr,
+                                 write ? "TX" : "RX", pkt, SGCount, addr,
                                  pkt_len);
                }
-
                if (!fifo)
                        addr += pkt_len;
-               SGCount++;
 
+               SGCount++;
        }
 
        /* Release host controller */
@@ -270,91 +285,45 @@ brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
 }
 
 /*
- * This function takes a buffer or packet, and fixes everything up
- * so that in the end, a DMA-able packet is created.
- *
- * A buffer does not have an associated packet pointer,
- * and may or may not be aligned.
- * A packet may consist of a single packet, or a packet chain.
- * If it is a packet chain, then all the packets in the chain
- * must be properly aligned.
- *
- * If the packet data is not aligned, then there may only be
- * one packet, and in this case,  it is copied to a new
- * aligned packet.
- *
+ * This function takes a single DMA-able packet.
  */
 int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
                               uint fix_inc, uint write, uint func, uint addr,
-                              uint reg_width, uint buflen_u, u8 *buffer,
                               struct sk_buff *pkt)
 {
-       int Status;
-       struct sk_buff *mypkt = NULL;
+       int status;
+       uint pkt_len = pkt->len;
+       bool fifo = (fix_inc == SDIOH_DATA_FIX);
 
        brcmf_dbg(TRACE, "Enter\n");
 
+       if (pkt == NULL)
+               return -EINVAL;
+
        brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
        if (brcmf_pm_resume_error(sdiodev))
                return -EIO;
-       /* Case 1: we don't have a packet. */
-       if (pkt == NULL) {
-               brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
-                         write ? "TX" : "RX", buflen_u);
-               mypkt = brcmu_pkt_buf_get_skb(buflen_u);
-               if (!mypkt) {
-                       brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
-                                 buflen_u);
-                       return -EIO;
-               }
-
-               /* For a write, copy the buffer data into the packet. */
-               if (write)
-                       memcpy(mypkt->data, buffer, buflen_u);
-
-               Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
-                                                   func, addr, mypkt);
-
-               /* For a read, copy the packet data back to the buffer. */
-               if (!write)
-                       memcpy(buffer, mypkt->data, buflen_u);
-
-               brcmu_pkt_buf_free_skb(mypkt);
-       } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
-               /*
-                * Case 2: We have a packet, but it is unaligned.
-                * In this case, we cannot have a chain (pkt->next == NULL)
-                */
-               brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
-                         write ? "TX" : "RX", pkt->len);
-               mypkt = brcmu_pkt_buf_get_skb(pkt->len);
-               if (!mypkt) {
-                       brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
-                                 pkt->len);
-                       return -EIO;
-               }
 
-               /* For a write, copy the buffer data into the packet. */
-               if (write)
-                       memcpy(mypkt->data, pkt->data, pkt->len);
-
-               Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
-                                                   func, addr, mypkt);
+       /* Claim host controller */
+       sdio_claim_host(sdiodev->func[func]);
 
-               /* For a read, copy the packet data back to the buffer. */
-               if (!write)
-                       memcpy(pkt->data, mypkt->data, mypkt->len);
+       pkt_len += 3;
+       pkt_len &= (uint)~3;
 
-               brcmu_pkt_buf_free_skb(mypkt);
-       } else {                /* case 3: We have a packet and
-                                it is aligned. */
-               brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n",
-                         write ? "Tx" : "Rx");
-               Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
-                                                   func, addr, pkt);
+       status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+                                          addr, pkt, pkt_len);
+       if (status) {
+               brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+                         write ? "TX" : "RX", pkt, addr, pkt_len, status);
+       } else {
+               brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
+                         write ? "TX" : "RX", pkt, addr, pkt_len);
        }
 
-       return Status;
+       /* Release host controller */
+       sdio_release_host(sdiodev->func[func]);
+
+       return status;
 }
 
 /* Read client card reg */
@@ -494,6 +463,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
 {
        int ret = 0;
        struct brcmf_sdio_dev *sdiodev;
+       struct brcmf_bus *bus_if;
        brcmf_dbg(TRACE, "Enter\n");
        brcmf_dbg(TRACE, "func->class=%x\n", func->class);
        brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
@@ -505,17 +475,25 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
                        brcmf_dbg(ERROR, "card private drvdata occupied\n");
                        return -ENXIO;
                }
+               bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+               if (!bus_if)
+                       return -ENOMEM;
                sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
-               if (!sdiodev)
+               if (!sdiodev) {
+                       kfree(bus_if);
                        return -ENOMEM;
+               }
                sdiodev->func[0] = func->card->sdio_func[0];
                sdiodev->func[1] = func;
+               sdiodev->bus_if = bus_if;
+               bus_if->bus_priv = sdiodev;
+               bus_if->type = SDIO_BUS;
                dev_set_drvdata(&func->card->dev, sdiodev);
 
                atomic_set(&sdiodev->suspend, false);
                init_waitqueue_head(&sdiodev->request_byte_wait);
                init_waitqueue_head(&sdiodev->request_word_wait);
-               init_waitqueue_head(&sdiodev->request_packet_wait);
+               init_waitqueue_head(&sdiodev->request_chain_wait);
                init_waitqueue_head(&sdiodev->request_buffer_wait);
        }
 
@@ -525,6 +503,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
                        return -ENODEV;
                sdiodev->func[2] = func;
 
+               bus_if = sdiodev->bus_if;
+               sdiodev->dev = &func->dev;
+               dev_set_drvdata(&func->dev, bus_if);
+
                brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
                ret = brcmf_sdio_probe(sdiodev);
        }
@@ -534,6 +516,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
 
 static void brcmf_ops_sdio_remove(struct sdio_func *func)
 {
+       struct brcmf_bus *bus_if;
        struct brcmf_sdio_dev *sdiodev;
        brcmf_dbg(TRACE, "Enter\n");
        brcmf_dbg(INFO, "func->class=%x\n", func->class);
@@ -542,10 +525,13 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
        brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
 
        if (func->num == 2) {
-               sdiodev = dev_get_drvdata(&func->card->dev);
+               bus_if = dev_get_drvdata(&func->dev);
+               sdiodev = bus_if->bus_priv;
                brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
                brcmf_sdio_remove(sdiodev);
                dev_set_drvdata(&func->card->dev, NULL);
+               dev_set_drvdata(&func->dev, NULL);
+               kfree(bus_if);
                kfree(sdiodev);
        }
 }
@@ -554,14 +540,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
 static int brcmf_sdio_suspend(struct device *dev)
 {
        mmc_pm_flag_t sdio_flags;
-       struct brcmf_sdio_dev *sdiodev;
        struct sdio_func *func = dev_to_sdio_func(dev);
+       struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
        int ret = 0;
 
        brcmf_dbg(TRACE, "\n");
 
-       sdiodev = dev_get_drvdata(&func->card->dev);
-
        atomic_set(&sdiodev->suspend, true);
 
        sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
@@ -583,10 +567,9 @@ static int brcmf_sdio_suspend(struct device *dev)
 
 static int brcmf_sdio_resume(struct device *dev)
 {
-       struct brcmf_sdio_dev *sdiodev;
        struct sdio_func *func = dev_to_sdio_func(dev);
+       struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
 
-       sdiodev = dev_get_drvdata(&func->card->dev);
        brcmf_sdio_wdtmr_enable(sdiodev, true);
        atomic_set(&sdiodev->suspend, false);
        return 0;
@@ -610,17 +593,26 @@ static struct sdio_driver brcmf_sdmmc_driver = {
 #endif /* CONFIG_PM_SLEEP */
 };
 
-/* bus register interface */
-int brcmf_bus_register(void)
+static void __exit brcmf_sdio_exit(void)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
-       return sdio_register_driver(&brcmf_sdmmc_driver);
+       sdio_unregister_driver(&brcmf_sdmmc_driver);
 }
 
-void brcmf_bus_unregister(void)
+static int __init brcmf_sdio_init(void)
 {
+       int ret;
+
        brcmf_dbg(TRACE, "Enter\n");
 
-       sdio_unregister_driver(&brcmf_sdmmc_driver);
+       ret = sdio_register_driver(&brcmf_sdmmc_driver);
+
+       if (ret)
+               brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
+
+       return ret;
 }
+
+module_init(brcmf_sdio_init);
+module_exit(brcmf_sdio_exit);
index 6da519e..ed60f4d 100644 (file)
@@ -87,7 +87,7 @@
 #define TOE_TX_CSUM_OL         0x00000001
 #define TOE_RX_CSUM_OL         0x00000002
 
-#define        BRCMF_BSS_INFO_VERSION  108 /* curr ver of brcmf_bss_info_le struct */
+#define        BRCMF_BSS_INFO_VERSION  109 /* curr ver of brcmf_bss_info_le struct */
 
 /* size of brcmf_scan_params not including variable length array */
 #define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
@@ -571,8 +571,14 @@ struct brcmf_dcmd {
        uint needed;            /* bytes needed (optional) */
 };
 
+struct brcmf_bus {
+       u8 type;                /* bus type */
+       void *bus_priv;         /* pointer to bus private structure */
+       enum brcmf_bus_state state;
+};
+
 /* Forward decls for struct brcmf_pub (see below) */
-struct brcmf_bus;              /* device bus info */
+struct brcmf_sdio;             /* device bus info */
 struct brcmf_proto;    /* device communication protocol info */
 struct brcmf_info;     /* device driver info */
 struct brcmf_cfg80211_dev; /* cfg80211 device info */
@@ -580,15 +586,16 @@ struct brcmf_cfg80211_dev; /* cfg80211 device info */
 /* Common structure for module and instance linkage */
 struct brcmf_pub {
        /* Linkage ponters */
-       struct brcmf_bus *bus;
+       struct brcmf_sdio *bus;
+       struct brcmf_bus *bus_if;
        struct brcmf_proto *prot;
        struct brcmf_info *info;
        struct brcmf_cfg80211_dev *config;
+       struct device *dev;             /* fullmac dongle device pointer */
 
        /* Internal brcmf items */
        bool up;                /* Driver up/down (to OS) */
        bool txoff;             /* Transmit flow-controlled */
-       enum brcmf_bus_state busstate;
        uint hdrlen;            /* Total BRCMF header length (proto + bus) */
        uint maxctl;            /* Max size rxctl request from proto to bus */
        uint rxsz;              /* Rx buffer size bus module should use */
@@ -656,7 +663,6 @@ struct brcmf_pub {
 
        u8 country_code[BRCM_CNTRY_BUF_SZ];
        char eventmask[BRCMF_EVENTING_MASK_LEN];
-
 };
 
 struct brcmf_if_event {
@@ -681,8 +687,8 @@ extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
  * Returned structure should have bus and prot pointers filled in.
  * bus_hdrlen specifies required headroom for bus module header.
  */
-extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus,
-                                     uint bus_hdrlen);
+extern struct brcmf_pub *brcmf_attach(struct brcmf_sdio *bus,
+                                     uint bus_hdrlen, struct device *dev);
 extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
 extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
@@ -699,7 +705,16 @@ extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
 
 /* Receive frame for delivery to OS.  Callee disposes of rxp. */
 extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
-                        struct sk_buff *rxp, int numpkt);
+                          struct sk_buff_head *rxlist);
+static inline void brcmf_rx_packet(struct brcmf_pub *drvr, int ifidx,
+                                  struct sk_buff *pkt)
+{
+       struct sk_buff_head q;
+
+       skb_queue_head_init(&q);
+       skb_queue_tail(&q, pkt);
+       brcmf_rx_frame(drvr, ifidx, &q);
+}
 
 /* Return pointer to interface name */
 extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
@@ -724,8 +739,6 @@ extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
                              void *pktdata, struct brcmf_event_msg *,
                              void **data_ptr);
 
-extern void brcmf_c_init(void);
-
 extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx,
                        char *name, u8 *mac_addr);
 extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);
index a249407..1841f99 100644 (file)
  * Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
  */
 
-/* Indicate (dis)interest in finding dongles. */
-extern int brcmf_bus_register(void);
-extern void brcmf_bus_unregister(void);
-
-/* obtain linux device object providing bus function */
-extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus);
-
 /* Stop bus module: clear pending frames, disable data flow */
-extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus);
+extern void brcmf_sdbrcm_bus_stop(struct device *dev);
 
 /* Initialize bus module: prepare for communication w/dongle */
-extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr);
+extern int brcmf_sdbrcm_bus_init(struct device *dev);
 
 /* Send a data frame to the dongle.  Callee disposes of txp. */
-extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp);
+extern int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *txp);
 
 /* Send/receive a control message to/from the dongle.
  * Expects caller to enforce a single outstanding transaction.
  */
 extern int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen);
 
 extern int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen);
 
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick);
+extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
 
 #endif                         /* _BRCMF_BUS_H_ */
index e34c5c3..ebd53aa 100644 (file)
@@ -58,7 +58,7 @@ struct brcmf_proto_cdc_dcmd {
  * Used on data packets to convey priority across USB.
  */
 #define        BDC_HEADER_LEN          4
-#define BDC_PROTO_VER          1       /* Protocol version */
+#define BDC_PROTO_VER          2       /* Protocol version */
 #define BDC_FLAG_VER_MASK      0xf0    /* Protocol version mask */
 #define BDC_FLAG_VER_SHIFT     4       /* Protocol version shift */
 #define BDC_FLAG_SUM_GOOD      0x04    /* Good RX checksums */
@@ -77,7 +77,7 @@ struct brcmf_proto_bdc_header {
        u8 flags;
        u8 priority;    /* 802.1d Priority, 4:7 flow control info for usb */
        u8 flags2;
-       u8 rssi;
+       u8 data_offset;
 };
 
 
@@ -116,7 +116,7 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
                len = CDC_MAX_MSG_SIZE;
 
        /* Send request */
-       return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg,
+       return brcmf_sdbrcm_bus_txctl(drvr->dev, (unsigned char *)&prot->msg,
                                      len);
 }
 
@@ -128,7 +128,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
        brcmf_dbg(TRACE, "Enter\n");
 
        do {
-               ret = brcmf_sdbrcm_bus_rxctl(drvr->bus,
+               ret = brcmf_sdbrcm_bus_rxctl(drvr->dev,
                                (unsigned char *)&prot->msg,
                                len + sizeof(struct brcmf_proto_cdc_dcmd));
                if (ret < 0)
@@ -280,7 +280,7 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
        struct brcmf_proto *prot = drvr->prot;
        int ret = -1;
 
-       if (drvr->busstate == BRCMF_BUS_DOWN) {
+       if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
                brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
                return ret;
        }
@@ -372,7 +372,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
 
        h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
        h->flags2 = 0;
-       h->rssi = 0;
+       h->data_offset = 0;
        BDC_SET_IF_IDX(h, ifidx);
 }
 
index 40928e5..69f335a 100644 (file)
@@ -32,8 +32,6 @@
 #define PKTFILTER_BUF_SIZE             2048
 #define BRCMF_ARPOL_MODE               0xb     /* agent|snoop|peer_autoreply */
 
-int brcmf_msg_level;
-
 #define MSGTRACE_VERSION       1
 
 #define BRCMF_PKT_FILTER_FIXED_LEN     offsetof(struct brcmf_pkt_filter_le, u)
@@ -85,19 +83,6 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
        return len;
 }
 
-void brcmf_c_init(void)
-{
-       /* Init global variables at run-time, not as part of the declaration.
-        * This is required to support init/de-init of the driver.
-        * Initialization
-        * of globals as part of the declaration results in non-deterministic
-        * behaviour since the value of the globals may be different on the
-        * first time that the driver is initialized vs subsequent
-        * initializations.
-        */
-       brcmf_msg_level = BRCMF_ERROR_VAL;
-}
-
 bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
                      struct sk_buff *pkt, int prec)
 {
index 719fd93..2c3a99d 100644 (file)
@@ -43,7 +43,6 @@
 #include "dhd_proto.h"
 #include "dhd_dbg.h"
 #include "wl_cfg80211.h"
-#include "bcmchip.h"
 
 MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
@@ -77,6 +76,7 @@ struct brcmf_info {
 };
 
 /* Error bits */
+int brcmf_msg_level = BRCMF_ERROR_VAL;
 module_param(brcmf_msg_level, int, 0);
 
 int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
@@ -292,7 +292,7 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
        struct brcmf_info *drvr_priv = drvr->info;
 
        /* Reject if down */
-       if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
+       if (!drvr->up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
                return -ENODEV;
 
        /* Update multicast statistic */
@@ -310,7 +310,7 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
        brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
 
        /* Use bus module to send data frame */
-       return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
+       return brcmf_sdbrcm_bus_txdata(drvr->dev, pktbuf);
 }
 
 static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
@@ -322,9 +322,11 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        brcmf_dbg(TRACE, "Enter\n");
 
        /* Reject if down */
-       if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
-               brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n",
-                         drvr_priv->pub.up, drvr_priv->pub.busstate);
+       if (!drvr_priv->pub.up ||
+           (drvr_priv->pub.bus_if->state == BRCMF_BUS_DOWN)) {
+               brcmf_dbg(ERROR, "xmit rejected pub.up=%d state=%d\n",
+                         drvr_priv->pub.up,
+                         drvr_priv->pub.bus_if->state);
                netif_stop_queue(ndev);
                return -ENODEV;
        }
@@ -397,26 +399,21 @@ static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx,
        return bcmerror;
 }
 
-void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
-                 int numpkt)
+void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
+                   struct sk_buff_head *skb_list)
 {
        struct brcmf_info *drvr_priv = drvr->info;
        unsigned char *eth;
        uint len;
        void *data;
-       struct sk_buff *pnext, *save_pktbuf;
-       int i;
+       struct sk_buff *skb, *pnext;
        struct brcmf_if *ifp;
        struct brcmf_event_msg event;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       save_pktbuf = skb;
-
-       for (i = 0; skb && i < numpkt; i++, skb = pnext) {
-
-               pnext = skb->next;
-               skb->next = NULL;
+       skb_queue_walk_safe(skb_list, skb, pnext) {
+               skb_unlink(skb, skb_list);
 
                /* Get the protocol, maintain skb around eth_type_trans()
                 * The main reason for this hack is for the limitation of
@@ -437,6 +434,12 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
                if (ifp == NULL)
                        ifp = drvr_priv->iflist[0];
 
+               if (!ifp || !ifp->ndev ||
+                   ifp->ndev->reg_state != NETREG_REGISTERED) {
+                       brcmu_pkt_buf_free_skb(skb);
+                       continue;
+               }
+
                skb->dev = ifp->ndev;
                skb->protocol = eth_type_trans(skb, skb->dev);
 
@@ -605,9 +608,7 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
 
        sprintf(info->driver, KBUILD_MODNAME);
        sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
-       sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
-       sprintf(info->bus_info, "%s",
-               dev_name(brcmf_bus_get_device(drvr_priv->pub.bus)));
+       sprintf(info->bus_info, "%s", dev_name(drvr_priv->pub.dev));
 }
 
 static struct ethtool_ops brcmf_ethtool_ops = {
@@ -761,7 +762,7 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
                buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
 
        /* send to dongle (must be up, and wl) */
-       if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
+       if ((drvr_priv->pub.bus_if->state != BRCMF_BUS_DATA)) {
                brcmf_dbg(ERROR, "DONGLE_DOWN\n");
                err = -EIO;
                goto done;
@@ -804,7 +805,7 @@ static int brcmf_netdev_stop(struct net_device *ndev)
                return 0;
 
        /* Set state and stop OS transmissions */
-       drvr->up = 0;
+       drvr->up = false;
        netif_stop_queue(ndev);
 
        return 0;
@@ -841,7 +842,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
        }
        /* Allow transmit calls */
        netif_start_queue(ndev);
-       drvr_priv->pub.up = 1;
+       drvr_priv->pub.up = true;
        if (brcmf_cfg80211_up(drvr_priv->pub.config)) {
                brcmf_dbg(ERROR, "failed to bring up cfg80211\n");
                return -1;
@@ -940,7 +941,8 @@ void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
        }
 }
 
-struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
+struct brcmf_pub *brcmf_attach(struct brcmf_sdio *bus, uint bus_hdrlen,
+                              struct device *dev)
 {
        struct brcmf_info *drvr_priv = NULL;
 
@@ -959,6 +961,8 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
        /* Link to bus module */
        drvr_priv->pub.bus = bus;
        drvr_priv->pub.hdrlen = bus_hdrlen;
+       drvr_priv->pub.bus_if = dev_get_drvdata(dev);
+       drvr_priv->pub.dev = dev;
 
        /* Attach and link in the protocol */
        if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
@@ -988,14 +992,14 @@ int brcmf_bus_start(struct brcmf_pub *drvr)
        brcmf_dbg(TRACE, "\n");
 
        /* Bring up the bus */
-       ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub);
+       ret = brcmf_sdbrcm_bus_init(drvr_priv->pub.dev);
        if (ret != 0) {
                brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
                return ret;
        }
 
        /* If bus is not ready, can't come up */
-       if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
+       if (drvr_priv->pub.bus_if->state != BRCMF_BUS_DATA) {
                brcmf_dbg(ERROR, "failed bus is not ready\n");
                return -ENODEV;
        }
@@ -1077,10 +1081,7 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
 
        /* attach to cfg80211 for primary interface */
        if (!ifidx) {
-               drvr->config =
-                       brcmf_cfg80211_attach(ndev,
-                                             brcmf_bus_get_device(drvr->bus),
-                                             drvr);
+               drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
                if (drvr->config == NULL) {
                        brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
                        goto fail;
@@ -1114,7 +1115,7 @@ static void brcmf_bus_detach(struct brcmf_pub *drvr)
                        brcmf_proto_stop(&drvr_priv->pub);
 
                        /* Stop the bus module */
-                       brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus);
+                       brcmf_sdbrcm_bus_stop(drvr_priv->pub.dev);
                }
        }
 }
@@ -1148,34 +1149,6 @@ void brcmf_detach(struct brcmf_pub *drvr)
        }
 }
 
-static void __exit brcmf_module_cleanup(void)
-{
-       brcmf_dbg(TRACE, "Enter\n");
-
-       brcmf_bus_unregister();
-}
-
-static int __init brcmf_module_init(void)
-{
-       int error;
-
-       brcmf_dbg(TRACE, "Enter\n");
-
-       error = brcmf_bus_register();
-
-       if (error) {
-               brcmf_dbg(ERROR, "brcmf_bus_register failed\n");
-               goto failed;
-       }
-       return 0;
-
-failed:
-       return -EINVAL;
-}
-
-module_init(brcmf_module_init);
-module_exit(brcmf_module_cleanup);
-
 int brcmf_os_proto_block(struct brcmf_pub *drvr)
 {
        struct brcmf_info *drvr_priv = drvr->info;
index 22913af..43ba0dd 100644 (file)
@@ -91,7 +91,6 @@ struct rte_console {
 #include "dhd_bus.h"
 #include "dhd_proto.h"
 #include "dhd_dbg.h"
-#include <bcmchip.h>
 
 #define TXQLEN         2048    /* bulk tx queue length */
 #define TXHI           (TXQLEN - 256)  /* turn on flow control above TXHI */
@@ -310,6 +309,11 @@ struct rte_console {
 /* Flags for SDH calls */
 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
 
+#define BRCMFMAC_FW_NAME       "brcm/brcmfmac.bin"
+#define BRCMFMAC_NV_NAME       "brcm/brcmfmac.txt"
+MODULE_FIRMWARE(BRCMFMAC_FW_NAME);
+MODULE_FIRMWARE(BRCMFMAC_NV_NAME);
+
 /*
  * Conversion of 802.1D priority to precedence level
  */
@@ -445,7 +449,7 @@ struct sdpcm_shared_le {
 
 /* misc chip info needed by some of the routines */
 /* Private data for SDIO bus interaction */
-struct brcmf_bus {
+struct brcmf_sdio {
        struct brcmf_pub *drvr;
 
        struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
@@ -562,9 +566,7 @@ struct brcmf_bus {
 
        struct semaphore sdsem;
 
-       const char *fw_name;
        const struct firmware *firmware;
-       const char *nv_name;
        u32 fw_ptr;
 };
 
@@ -602,7 +604,7 @@ static void pkt_align(struct sk_buff *p, int len, int align)
 }
 
 /* To check if there's window offered */
-static bool data_ok(struct brcmf_bus *bus)
+static bool data_ok(struct brcmf_sdio *bus)
 {
        return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
               ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
@@ -613,7 +615,7 @@ static bool data_ok(struct brcmf_bus *bus)
  * adresses on the 32 bit backplane bus.
  */
 static void
-r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
 {
        u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
        *retryvar = 0;
@@ -633,7 +635,7 @@ r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
 }
 
 static void
-w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar)
 {
        u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
        *retryvar = 0;
@@ -658,14 +660,14 @@ w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
 /* Packet free applicable unconditionally for sdio and sdspi.
  * Conditional if bufpool was present for gspi bus.
  */
-static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
+static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
 {
        if (bus->usebufpool)
                brcmu_pkt_buf_free_skb(pkt);
 }
 
 /* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
+static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 {
        int err;
        u8 clkctl, clkreq, devctl;
@@ -786,7 +788,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
 }
 
 /* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
+static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -799,7 +801,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
 }
 
 /* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
+static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
 {
 #ifdef BCMDBG
        uint oldstate = bus->clkstate;
@@ -855,7 +857,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
        return 0;
 }
 
-static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
+static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
 {
        uint retries = 0;
 
@@ -927,13 +929,13 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
        return 0;
 }
 
-static void bus_wake(struct brcmf_bus *bus)
+static void bus_wake(struct brcmf_sdio *bus)
 {
        if (bus->sleeping)
                brcmf_sdbrcm_bussleep(bus, false);
 }
 
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
+static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
 {
        u32 intstatus = 0;
        u32 hmb_data;
@@ -1009,7 +1011,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
        return intstatus;
 }
 
-static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
+static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 {
        uint retries = 0;
        u16 lastrbc;
@@ -1066,11 +1068,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
 
        /* If we can't reach the device, signal failure */
        if (err || brcmf_sdcard_regfail(bus->sdiodev))
-               bus->drvr->busstate = BRCMF_BUS_DOWN;
+               bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
 }
 
 /* copy a buffer into a pkt buffer chain */
-static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_bus *bus, uint len)
+static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
 {
        uint n, ret = 0;
        struct sk_buff *p;
@@ -1093,7 +1095,7 @@ static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_bus *bus, uint len)
 }
 
 /* return total length of buffer chain */
-static uint brcmf_sdbrcm_glom_len(struct brcmf_bus *bus)
+static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
 {
        struct sk_buff *p;
        uint total;
@@ -1104,7 +1106,7 @@ static uint brcmf_sdbrcm_glom_len(struct brcmf_bus *bus)
        return total;
 }
 
-static void brcmf_sdbrcm_free_glom(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
 {
        struct sk_buff *cur, *next;
 
@@ -1114,13 +1116,13 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_bus *bus)
        }
 }
 
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
+static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 {
        u16 dlen, totlen;
        u8 *dptr, num = 0;
 
        u16 sublen, check;
-       struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
+       struct sk_buff *pfirst, *pnext;
 
        int errcode;
        u8 chan, seq, doff, sfdoff;
@@ -1137,7 +1139,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
 
        /* If there's a descriptor, generate the packet chain */
        if (bus->glomd) {
-               pfirst = plast = pnext = NULL;
+               pfirst = pnext = NULL;
                dlen = (u16) (bus->glomd->len);
                dptr = bus->glomd->data;
                if (!dlen || (dlen & 1)) {
@@ -1228,17 +1230,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                 * packet and and copy into the chain.
                 */
                if (usechain) {
-                       errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
+                       errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
                                        bus->sdiodev->sbwad,
-                                       SDIO_FUNC_2,
-                                       F2SYNC, (u8 *) pfirst->data, dlen,
-                                       pfirst);
+                                       SDIO_FUNC_2, F2SYNC, &bus->glom);
                } else if (bus->dataptr) {
                        errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
                                        bus->sdiodev->sbwad,
-                                       SDIO_FUNC_2,
-                                       F2SYNC, bus->dataptr, dlen,
-                                       NULL);
+                                       SDIO_FUNC_2, F2SYNC,
+                                       bus->dataptr, dlen);
                        sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
                        if (sublen != dlen) {
                                brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
@@ -1338,10 +1337,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                /* Remove superframe header, remember offset */
                skb_pull(pfirst, doff);
                sfdoff = doff;
+               num = 0;
 
                /* Validate all the subframe headers */
-               for (num = 0, pnext = pfirst; pnext && !errcode;
-                    num++, pnext = pnext->next) {
+               skb_queue_walk(&bus->glom, pnext) {
+                       /* leave when invalid subframe is found */
+                       if (errcode)
+                               break;
+
                        dptr = (u8 *) (pnext->data);
                        dlen = (u16) (pnext->len);
                        sublen = get_unaligned_le16(dptr);
@@ -1374,6 +1377,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                                          num, doff, sublen, SDPCM_HDRLEN);
                                errcode = -1;
                        }
+                       /* increase the subframe count */
+                       num++;
                }
 
                if (errcode) {
@@ -1394,13 +1399,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                }
 
                /* Basic SD framing looks ok - process each packet (header) */
-               save_pfirst = pfirst;
-               plast = NULL;
-
-               for (num = 0; pfirst; rxseq++, pfirst = pnext) {
-                       pnext = pfirst->next;
-                       pfirst->next = NULL;
 
+               skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
                        dptr = (u8 *) (pfirst->data);
                        sublen = get_unaligned_le16(dptr);
                        chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -1420,6 +1420,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                                bus->rx_badseq++;
                                rxseq = seq;
                        }
+                       rxseq++;
+
 #ifdef BCMDBG
                        if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
                                printk(KERN_DEBUG "Rx Subframe Data:\n");
@@ -1432,36 +1434,22 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                        skb_pull(pfirst, doff);
 
                        if (pfirst->len == 0) {
+                               skb_unlink(pfirst, &bus->glom);
                                brcmu_pkt_buf_free_skb(pfirst);
-                               if (plast)
-                                       plast->next = pnext;
-                               else
-                                       save_pfirst = pnext;
-
                                continue;
                        } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx,
                                                       pfirst) != 0) {
                                brcmf_dbg(ERROR, "rx protocol error\n");
                                bus->drvr->rx_errors++;
+                               skb_unlink(pfirst, &bus->glom);
                                brcmu_pkt_buf_free_skb(pfirst);
-                               if (plast)
-                                       plast->next = pnext;
-                               else
-                                       save_pfirst = pnext;
-
                                continue;
                        }
 
-                       /* this packet will go up, link back into
-                                chain and count it */
-                       pfirst->next = pnext;
-                       plast = pfirst;
-                       num++;
-
 #ifdef BCMDBG
                        if (BRCMF_GLOM_ON()) {
                                brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
-                                         num, pfirst, pfirst->data,
+                                         bus->glom.qlen, pfirst, pfirst->data,
                                          pfirst->len, pfirst->next,
                                          pfirst->prev);
                                print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
@@ -1470,19 +1458,20 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                        }
 #endif                         /* BCMDBG */
                }
-               if (num) {
+               /* sent any remaining packets up */
+               if (bus->glom.qlen) {
                        up(&bus->sdsem);
-                       brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
+                       brcmf_rx_frame(bus->drvr, ifidx, &bus->glom);
                        down(&bus->sdsem);
                }
 
                bus->rxglomframes++;
-               bus->rxglompkts += num;
+               bus->rxglompkts += bus->glom.qlen;
        }
        return num;
 }
 
-static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
+static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
                                        bool *pending)
 {
        DECLARE_WAITQUEUE(wait, current);
@@ -1504,7 +1493,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
        return timeout;
 }
 
-static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
 {
        if (waitqueue_active(&bus->dcmd_resp_wait))
                wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1512,7 +1501,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
        return 0;
 }
 static void
-brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
 {
        uint rdlen, pad;
 
@@ -1570,8 +1559,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
        sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
                                bus->sdiodev->sbwad,
                                SDIO_FUNC_2,
-                               F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen,
-                               NULL);
+                               F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
        bus->f2rxdata++;
 
        /* Control frame failures need retransmission */
@@ -1602,7 +1590,7 @@ done:
 }
 
 /* Pad read to blocksize for efficiency */
-static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
+static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
 {
        if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
                *pad = bus->blocksize - (*rdlen % bus->blocksize);
@@ -1615,7 +1603,7 @@ static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
 }
 
 static void
-brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
+brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
                         struct sk_buff **pkt, u8 **rxbuf)
 {
        int sdret;              /* Return code from calls */
@@ -1627,9 +1615,8 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
        pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
        *rxbuf = (u8 *) ((*pkt)->data);
        /* Read the entire frame */
-       sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
-                                     SDIO_FUNC_2, F2SYNC,
-                                     *rxbuf, rdlen, *pkt);
+       sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+                                     SDIO_FUNC_2, F2SYNC, *pkt);
        bus->f2rxdata++;
 
        if (sdret < 0) {
@@ -1648,7 +1635,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
 
 /* Checks the header */
 static int
-brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf,
+brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
                  u8 rxseq, u16 nextlen, u16 *len)
 {
        u16 check;
@@ -1704,7 +1691,7 @@ fail:
 
 /* Return true if there may be more frames to read */
 static uint
-brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
+brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
 {
        u16 len, check; /* Extracted hardware header fields */
        u8 chan, seq, doff;     /* Extracted software header fields */
@@ -1727,7 +1714,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
        *finished = false;
 
        for (rxseq = bus->rx_seq, rxleft = maxframes;
-            !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
+            !bus->rxskip && rxleft &&
+            bus->drvr->bus_if->state != BRCMF_BUS_DOWN;
             rxseq++, rxleft--) {
 
                /* Handle glomming separately */
@@ -1857,7 +1845,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
                /* Read frame header (hardware and software) */
                sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
                                              SDIO_FUNC_2, F2SYNC, bus->rxhdr,
-                                             BRCMF_FIRSTREAD, NULL);
+                                             BRCMF_FIRSTREAD);
                bus->f2rxhdrs++;
 
                if (sdret < 0) {
@@ -2006,9 +1994,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
                pkt_align(pkt, rdlen, BRCMF_SDALIGN);
 
                /* Read the remaining frame data */
-               sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
-                               SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
-                               rdlen, pkt);
+               sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+                                             SDIO_FUNC_2, F2SYNC, pkt);
                bus->f2rxdata++;
 
                if (sdret < 0) {
@@ -2075,7 +2062,7 @@ deliver:
 
                /* Unlock during rx call */
                up(&bus->sdsem);
-               brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
+               brcmf_rx_packet(bus->drvr, ifidx, pkt);
                down(&bus->sdsem);
        }
        rxcount = maxframes - rxleft;
@@ -2095,16 +2082,8 @@ deliver:
        return rxcount;
 }
 
-static int
-brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
-                   u8 *buf, uint nbytes, struct sk_buff *pkt)
-{
-       return brcmf_sdcard_send_buf
-               (bus->sdiodev, addr, fn, flags, buf, nbytes, pkt);
-}
-
 static void
-brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
+brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
 {
        up(&bus->sdsem);
        wait_event_interruptible_timeout(bus->ctrl_wait,
@@ -2114,7 +2093,7 @@ brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
 }
 
 static void
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
+brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
 {
        if (waitqueue_active(&bus->ctrl_wait))
                wake_up_interruptible(&bus->ctrl_wait);
@@ -2123,7 +2102,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
 
 /* Writes a HW/SW header into the packet and sends it. */
 /* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
+static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
                              uint chan, bool free_pkt)
 {
        int ret;
@@ -2212,9 +2191,8 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
        if (len & (ALIGNMENT - 1))
                        len = roundup(len, ALIGNMENT);
 
-       ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
-                                   SDIO_FUNC_2, F2SYNC, frame,
-                                   len, pkt);
+       ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+                                   SDIO_FUNC_2, F2SYNC, pkt);
        bus->f2txdata++;
 
        if (ret < 0) {
@@ -2261,7 +2239,7 @@ done:
        return ret;
 }
 
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
+static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
 {
        struct sk_buff *pkt;
        u32 intstatus = 0;
@@ -2309,14 +2287,14 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
        }
 
        /* Deflow-control stack if needed */
-       if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
+       if (drvr->up && (drvr->bus_if->state == BRCMF_BUS_DATA) &&
            drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
                brcmf_txflowcontrol(drvr, 0, OFF);
 
        return cnt;
 }
 
-static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
 {
        u32 intstatus, newstatus = 0;
        uint retries = 0;
@@ -2344,7 +2322,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
                                               SBSDIO_DEVICE_CTL, &err);
                if (err) {
                        brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
-                       bus->drvr->busstate = BRCMF_BUS_DOWN;
+                       bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
                }
 #endif                         /* BCMDBG */
 
@@ -2354,7 +2332,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
                if (err) {
                        brcmf_dbg(ERROR, "error reading CSR: %d\n",
                                  err);
-                       bus->drvr->busstate = BRCMF_BUS_DOWN;
+                       bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
                }
 
                brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
@@ -2367,7 +2345,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
                        if (err) {
                                brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
                                          err);
-                               bus->drvr->busstate = BRCMF_BUS_DOWN;
+                               bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
                        }
                        devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
                        brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
@@ -2375,7 +2353,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
                        if (err) {
                                brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
                                          err);
-                               bus->drvr->busstate = BRCMF_BUS_DOWN;
+                               bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
                        }
                        bus->clkstate = CLK_AVAIL;
                } else {
@@ -2477,9 +2455,9 @@ clkwait:
                (bus->clkstate == CLK_AVAIL)) {
                int ret, i;
 
-               ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+               ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
                        SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
-                       (u32) bus->ctrl_frame_len, NULL);
+                       (u32) bus->ctrl_frame_len);
 
                if (ret < 0) {
                        /* On failure, abort the command and
@@ -2531,11 +2509,11 @@ clkwait:
                 else await next interrupt */
        /* On failed register access, all bets are off:
                 no resched or interrupts */
-       if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
+       if ((bus->drvr->bus_if->state == BRCMF_BUS_DOWN) ||
            brcmf_sdcard_regfail(bus->sdiodev)) {
                brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
                          brcmf_sdcard_regfail(bus->sdiodev));
-               bus->drvr->busstate = BRCMF_BUS_DOWN;
+               bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
                bus->intstatus = 0;
        } else if (bus->clkstate == CLK_PENDING) {
                brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
@@ -2562,7 +2540,7 @@ clkwait:
 
 static int brcmf_sdbrcm_dpc_thread(void *data)
 {
-       struct brcmf_bus *bus = (struct brcmf_bus *) data;
+       struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
 
        allow_signal(SIGTERM);
        /* Run until signal received */
@@ -2572,12 +2550,12 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
                if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
                        /* Call bus dpc unless it indicated down
                        (then clean stop) */
-                       if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
+                       if (bus->drvr->bus_if->state != BRCMF_BUS_DOWN) {
                                if (brcmf_sdbrcm_dpc(bus))
                                        complete(&bus->dpc_wait);
                        } else {
                                /* after stopping the bus, exit thread */
-                               brcmf_sdbrcm_bus_stop(bus);
+                               brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
                                bus->dpc_tsk = NULL;
                                break;
                        }
@@ -2587,10 +2565,13 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
        return 0;
 }
 
-int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
+int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
 {
        int ret = -EBADE;
        uint datalen, prec;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+       struct brcmf_sdio *bus = sdiodev->bus;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2638,7 +2619,7 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
 }
 
 static int
-brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
+brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
                 uint size)
 {
        int bcmerror = 0;
@@ -2699,7 +2680,7 @@ xfer_done:
 #ifdef BCMDBG
 #define CONSOLE_LINE_MAX       192
 
-static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
 {
        struct brcmf_console *c = &bus->console;
        u8 line[CONSOLE_LINE_MAX], ch;
@@ -2776,14 +2757,14 @@ break2:
 }
 #endif                         /* BCMDBG */
 
-static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
+static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
 {
        int i;
        int ret;
 
        bus->ctrl_frame_stat = false;
-       ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
-                                   SDIO_FUNC_2, F2SYNC, frame, len, NULL);
+       ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
+                                   SDIO_FUNC_2, F2SYNC, frame, len);
 
        if (ret < 0) {
                /* On failure, abort the command and terminate the frame */
@@ -2819,7 +2800,7 @@ static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
 }
 
 int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
 {
        u8 *frame;
        u16 len;
@@ -2827,6 +2808,9 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
        uint retries = 0;
        u8 doff = 0;
        int ret = -1;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+       struct brcmf_sdio *bus = sdiodev->bus;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2934,11 +2918,14 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
 }
 
 int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
 {
        int timeleft;
        uint rxlen = 0;
        bool pending;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+       struct brcmf_sdio *bus = sdiodev->bus;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2971,7 +2958,7 @@ brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
        return rxlen ? (int)rxlen : -ETIMEDOUT;
 }
 
-static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
+static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
 {
        int bcmerror = 0;
 
@@ -3004,7 +2991,7 @@ err:
        return bcmerror;
 }
 
-static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
 {
        int bcmerror = 0;
        u32 varsize;
@@ -3091,7 +3078,7 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
        return bcmerror;
 }
 
-static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
+static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
 {
        uint retries;
        int bcmerror = 0;
@@ -3134,13 +3121,13 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
                /* Allow HT Clock now that the ARM is running. */
                bus->alp_only = false;
 
-               bus->drvr->busstate = BRCMF_BUS_LOAD;
+               bus->drvr->bus_if->state = BRCMF_BUS_LOAD;
        }
 fail:
        return bcmerror;
 }
 
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
+static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
 {
        if (bus->firmware->size < bus->fw_ptr + len)
                len = bus->firmware->size - bus->fw_ptr;
@@ -3150,10 +3137,7 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
        return len;
 }
 
-MODULE_FIRMWARE(BCM4329_FW_NAME);
-MODULE_FIRMWARE(BCM4329_NV_NAME);
-
-static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
 {
        int offset = 0;
        uint len;
@@ -3162,8 +3146,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
 
        brcmf_dbg(INFO, "Enter\n");
 
-       bus->fw_name = BCM4329_FW_NAME;
-       ret = request_firmware(&bus->firmware, bus->fw_name,
+       ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME,
                               &bus->sdiodev->func[2]->dev);
        if (ret) {
                brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
@@ -3253,15 +3236,14 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
        return buf_len;
 }
 
-static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
 {
        uint len;
        char *memblock = NULL;
        char *bufp;
        int ret;
 
-       bus->nv_name = BCM4329_NV_NAME;
-       ret = request_firmware(&bus->firmware, bus->nv_name,
+       ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME,
                               &bus->sdiodev->func[2]->dev);
        if (ret) {
                brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
@@ -3301,7 +3283,7 @@ err:
        return ret;
 }
 
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
 {
        int bcmerror = -1;
 
@@ -3334,7 +3316,7 @@ err:
 }
 
 static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
 {
        bool ret;
 
@@ -3348,12 +3330,15 @@ brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
        return ret;
 }
 
-void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
+void brcmf_sdbrcm_bus_stop(struct device *dev)
 {
        u32 local_hostintmask;
        u8 saveclk;
        uint retries;
        int err;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+       struct brcmf_sdio *bus = sdiodev->bus;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3382,7 +3367,7 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
        bus->hostintmask = 0;
 
        /* Change our idea of bus state */
-       bus->drvr->busstate = BRCMF_BUS_DOWN;
+       bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
 
        /* Force clocks on backplane to be sure F2 interrupt propagates */
        saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
@@ -3426,9 +3411,11 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
        up(&bus->sdsem);
 }
 
-int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
+int brcmf_sdbrcm_bus_init(struct device *dev)
 {
-       struct brcmf_bus *bus = drvr->bus;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+       struct brcmf_sdio *bus = sdiodev->bus;
        unsigned long timeout;
        uint retries = 0;
        u8 ready, enable;
@@ -3438,7 +3425,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
        brcmf_dbg(TRACE, "Enter\n");
 
        /* try to download image and nvram to the dongle */
-       if (drvr->busstate == BRCMF_BUS_DOWN) {
+       if (bus_if->state == BRCMF_BUS_DOWN) {
                if (!(brcmf_sdbrcm_download_firmware(bus)))
                        return -1;
        }
@@ -3504,7 +3491,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
                                       SBSDIO_WATERMARK, 8, &err);
 
                /* Set bus state according to enable result */
-               drvr->busstate = BRCMF_BUS_DATA;
+               bus_if->state = BRCMF_BUS_DATA;
        }
 
        else {
@@ -3519,7 +3506,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
                               SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
 
        /* If we didn't come up, turn off backplane clock */
-       if (drvr->busstate != BRCMF_BUS_DATA)
+       if (bus_if->state != BRCMF_BUS_DATA)
                brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
 
 exit:
@@ -3530,7 +3517,7 @@ exit:
 
 void brcmf_sdbrcm_isr(void *arg)
 {
-       struct brcmf_bus *bus = (struct brcmf_bus *) arg;
+       struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3539,7 +3526,7 @@ void brcmf_sdbrcm_isr(void *arg)
                return;
        }
 
-       if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
+       if (bus->drvr->bus_if->state == BRCMF_BUS_DOWN) {
                brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
                return;
        }
@@ -3562,14 +3549,14 @@ void brcmf_sdbrcm_isr(void *arg)
                complete(&bus->dpc_wait);
 }
 
-static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
+static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
 {
-       struct brcmf_bus *bus;
+#ifdef BCMDBG
+       struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
+#endif /* BCMDBG */
 
        brcmf_dbg(TIMER, "Enter\n");
 
-       bus = drvr->bus;
-
        /* Ignore the timer if simulating bus down */
        if (bus->sleeping)
                return false;
@@ -3613,7 +3600,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
        }
 #ifdef BCMDBG
        /* Poll for console output periodically */
-       if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) {
+       if (bus_if->state == BRCMF_BUS_DATA &&
+           bus->console_interval != 0) {
                bus->console.count += BRCMF_WD_POLL_MS;
                if (bus->console.count >= bus->console_interval) {
                        bus->console.count -= bus->console_interval;
@@ -3648,10 +3636,12 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
 {
        if (chipid == BCM4329_CHIP_ID)
                return true;
+       if (chipid == BCM4330_CHIP_ID)
+               return true;
        return false;
 }
 
-static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3663,7 +3653,7 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
        bus->databuf = NULL;
 }
 
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3699,7 +3689,7 @@ fail:
 }
 
 static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
+brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
 {
        u8 clkctl = 0;
        int err = 0;
@@ -3784,7 +3774,7 @@ fail:
        return false;
 }
 
-static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3792,7 +3782,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
        brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
                               SDIO_FUNC_ENABLE_1, NULL);
 
-       bus->drvr->busstate = BRCMF_BUS_DOWN;
+       bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
        bus->sleeping = false;
        bus->rxflow = false;
 
@@ -3819,7 +3809,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
 static int
 brcmf_sdbrcm_watchdog_thread(void *data)
 {
-       struct brcmf_bus *bus = (struct brcmf_bus *)data;
+       struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
 
        allow_signal(SIGTERM);
        /* Run until signal received */
@@ -3827,7 +3817,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
                if (kthread_should_stop())
                        break;
                if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
-                       brcmf_sdbrcm_bus_watchdog(bus->drvr);
+                       brcmf_sdbrcm_bus_watchdog(bus);
                        /* Count the tick for reference */
                        bus->drvr->tickcnt++;
                } else
@@ -3839,7 +3829,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
 static void
 brcmf_sdbrcm_watchdog(unsigned long data)
 {
-       struct brcmf_bus *bus = (struct brcmf_bus *)data;
+       struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
 
        if (bus->watchdog_tsk) {
                complete(&bus->watchdog_wait);
@@ -3850,7 +3840,7 @@ brcmf_sdbrcm_watchdog(unsigned long data)
        }
 }
 
-static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3867,7 +3857,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
 }
 
 /* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3889,21 +3879,10 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
        brcmf_dbg(TRACE, "Disconnected\n");
 }
 
-void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
-                        u32 regsva, struct brcmf_sdio_dev *sdiodev)
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
 {
        int ret;
-       struct brcmf_bus *bus;
-
-       /* Init global variables at run-time, not as part of the declaration.
-        * This is required to support init/de-init of the driver.
-        * Initialization
-        * of globals as part of the declaration results in non-deterministic
-        * behavior since the value of the globals may be different on the
-        * first time that the driver is initialized vs subsequent
-        * initializations.
-        */
-       brcmf_c_init();
+       struct brcmf_sdio *bus;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3911,7 +3890,7 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
         * regsva == SI_ENUM_BASE*/
 
        /* Allocate private bus interface state */
-       bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+       bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
        if (!bus)
                goto fail;
 
@@ -3963,7 +3942,7 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
        }
 
        /* Attach to the brcmf/OS/network interface */
-       bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
+       bus->drvr = brcmf_attach(bus, SDPCM_RESERVE, bus->sdiodev->dev);
        if (!bus->drvr) {
                brcmf_dbg(ERROR, "brcmf_attach failed\n");
                goto fail;
@@ -4015,7 +3994,7 @@ fail:
 
 void brcmf_sdbrcm_disconnect(void *ptr)
 {
-       struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
+       struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -4025,13 +4004,8 @@ void brcmf_sdbrcm_disconnect(void *ptr)
        brcmf_dbg(TRACE, "Disconnected\n");
 }
 
-struct device *brcmf_bus_get_device(struct brcmf_bus *bus)
-{
-       return &bus->sdiodev->func[2]->dev;
-}
-
 void
-brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
+brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
 {
        /* Totally stop the timer */
        if (!wdtick && bus->wd_timer_valid == true) {
@@ -4042,7 +4016,7 @@ brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
        }
 
        /* don't start the wd until fw is loaded */
-       if (bus->drvr->busstate == BRCMF_BUS_DOWN)
+       if (bus->drvr->bus_if->state == BRCMF_BUS_DOWN)
                return;
 
        if (wdtick) {
index f6b1822..a6048d7 100644 (file)
@@ -59,37 +59,17 @@ struct sdiod_drive_str {
        u8 strength;    /* Pad Drive Strength in mA */
        u8 sel;         /* Chip-specific select value */
 };
-/* SDIO Drive Strength to sel value table for PMU Rev 1 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = {
-       {
-       4, 0x2}, {
-       2, 0x3}, {
-       1, 0x0}, {
-       0, 0x0}
-       };
-/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = {
-       {
-       12, 0x7}, {
-       10, 0x6}, {
-       8, 0x5}, {
-       6, 0x4}, {
-       4, 0x2}, {
-       2, 0x1}, {
-       0, 0x0}
-       };
-/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
-       {
-       32, 0x7}, {
-       26, 0x6}, {
-       22, 0x5}, {
-       16, 0x4}, {
-       12, 0x3}, {
-       8, 0x2}, {
-       4, 0x1}, {
-       0, 0x0}
-       };
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+       {32, 0x6},
+       {26, 0x7},
+       {22, 0x4},
+       {16, 0x5},
+       {12, 0x2},
+       {8, 0x3},
+       {4, 0x0},
+       {0, 0x1}
+};
 
 u8
 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
@@ -396,6 +376,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
                ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
                ci->ramsize = BCM4329_RAMSIZE;
                break;
+       case BCM4330_CHIP_ID:
+               ci->c_inf[0].wrapbase = 0x18100000;
+               ci->c_inf[0].cib = 0x27004211;
+               ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+               ci->c_inf[1].base = 0x18002000;
+               ci->c_inf[1].wrapbase = 0x18102000;
+               ci->c_inf[1].cib = 0x07004211;
+               ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+               ci->c_inf[2].base = 0x18004000;
+               ci->c_inf[2].wrapbase = 0x18104000;
+               ci->c_inf[2].cib = 0x0d080401;
+               ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+               ci->c_inf[3].base = 0x18003000;
+               ci->c_inf[3].wrapbase = 0x18103000;
+               ci->c_inf[3].cib = 0x03004211;
+               ci->ramsize = 0x48000;
+               break;
        default:
                brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
                return -ENODEV;
@@ -569,19 +566,8 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
                return;
 
        switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
-       case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
-               str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1;
-               str_mask = 0x30000000;
-               str_shift = 28;
-               break;
-       case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
-       case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
-               str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
-               str_mask = 0x00003800;
-               str_shift = 11;
-               break;
-       case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
-               str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
+       case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+               str_tab = (struct sdiod_drive_str *)&sdiod_drvstr_tab1_1v8;
                str_mask = 0x00003800;
                str_shift = 11;
                break;
index 726fa89..d36a2a8 100644 (file)
@@ -132,9 +132,10 @@ struct brcmf_sdio_dev {
        atomic_t suspend;               /* suspend flag */
        wait_queue_head_t request_byte_wait;
        wait_queue_head_t request_word_wait;
-       wait_queue_head_t request_packet_wait;
+       wait_queue_head_t request_chain_wait;
        wait_queue_head_t request_buffer_wait;
-
+       struct device *dev;
+       struct brcmf_bus *bus_if;
 };
 
 /* Register/deregister device interrupt handler. */
@@ -182,11 +183,21 @@ extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev);
  * NOTE: Async operation is not currently supported.
  */
 extern int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                     uint flags, struct sk_buff *pkt);
+extern int
 brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+                     uint flags, u8 *buf, uint nbytes);
+
+extern int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                     uint flags, struct sk_buff *pkt);
 extern int
 brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+                     uint flags, u8 *buf, uint nbytes);
+extern int
+brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                       uint flags, struct sk_buff_head *pktq);
 
 /* Flags bits */
 
@@ -237,16 +248,18 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
 /* read or write any buffer using cmd53 */
 extern int
 brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
-                          uint fix_inc, uint rw, uint fnc_num,
-                          u32 addr, uint regwidth,
-                          u32 buflen, u8 *buffer, struct sk_buff *pkt);
+                          uint fix_inc, uint rw, uint fnc_num, u32 addr,
+                          struct sk_buff *pkt);
+extern int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+                         uint write, uint func, uint addr,
+                         struct sk_buff_head *pktq);
 
 /* Watchdog timer interface for pm ops */
 extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
                                    bool enable);
 
-extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
-                               u32 regsva, struct brcmf_sdio_dev *sdiodev);
+extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
 extern void brcmf_sdbrcm_disconnect(void *ptr);
 extern void brcmf_sdbrcm_isr(void *arg);
 #endif                         /* _BRCM_SDH_H_ */
index cc19a73..f23b0c3 100644 (file)
@@ -1429,7 +1429,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
 
 static s32
 brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
-                        enum nl80211_tx_power_setting type, s32 dbm)
+                           enum nl80211_tx_power_setting type, s32 mbm)
 {
 
        struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
@@ -1437,6 +1437,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
        u16 txpwrmw;
        s32 err = 0;
        s32 disable = 0;
+       s32 dbm = MBM_TO_DBM(mbm);
 
        WL_TRACE("Enter\n");
        if (!check_sys_up(wiphy))
@@ -1446,12 +1447,6 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
        case NL80211_TX_POWER_AUTOMATIC:
                break;
        case NL80211_TX_POWER_LIMITED:
-               if (dbm < 0) {
-                       WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
-                       err = -EINVAL;
-                       goto done;
-               }
-               break;
        case NL80211_TX_POWER_FIXED:
                if (dbm < 0) {
                        WL_ERR("TX_POWER_FIXED - dbm is negative\n");
index 39e3054..ab9bb11 100644 (file)
 
 #define        BADIDX          (SI_MAXCORES + 1)
 
-/* Newer chips can access PCI/PCIE and CC core without requiring to change
- * PCI BAR0 WIN
- */
-#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) ||        \
-                    (((si)->pub.buscoretype == PCI_CORE_ID) && \
-                     (si)->pub.buscorerev >= 13))
-
-#define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \
-                         PCI_16KB0_CCREGS_OFFSET))
-
 #define        IS_SIM(chippkg) \
        ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
 
-/*
- * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts
- * before after core switching to avoid invalid register accesss inside ISR.
- */
-#define INTR_OFF(si, intr_val) \
-       if ((si)->intrsoff_fn && \
-           (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
-               intr_val = (*(si)->intrsoff_fn)((si)->intr_arg)
-
-#define INTR_RESTORE(si, intr_val) \
-       if ((si)->intrsrestore_fn && \
-           (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
-               (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val)
+#define PCI(sih)       (ai_get_buscoretype(sih) == PCI_CORE_ID)
+#define PCIE(sih)      (ai_get_buscoretype(sih) == PCIE_CORE_ID)
 
-#define PCI(si)                ((si)->pub.buscoretype == PCI_CORE_ID)
-#define PCIE(si)       ((si)->pub.buscoretype == PCIE_CORE_ID)
-
-#define PCI_FORCEHT(si)        (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
+#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
 
 #ifdef BCMDBG
 #define        SI_MSG(fmt, ...)        pr_debug(fmt, ##__VA_ARGS__)
        (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
                IS_ALIGNED((x), SI_CORE_SIZE))
 
-#define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \
-                       PCI_16KB0_PCIREGS_OFFSET)
-
 struct aidmp {
        u32 oobselina30;        /* 0x000 */
        u32 oobselina74;        /* 0x004 */
@@ -481,406 +454,13 @@ struct aidmp {
        u32 componentid3;       /* 0xffc */
 };
 
-/* EROM parsing */
-
-static u32
-get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match)
-{
-       u32 ent;
-       uint inv = 0, nom = 0;
-
-       while (true) {
-               ent = R_REG(*eromptr);
-               (*eromptr)++;
-
-               if (mask == 0)
-                       break;
-
-               if ((ent & ER_VALID) == 0) {
-                       inv++;
-                       continue;
-               }
-
-               if (ent == (ER_END | ER_VALID))
-                       break;
-
-               if ((ent & mask) == match)
-                       break;
-
-               nom++;
-       }
-
-       return ent;
-}
-
-static u32
-get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st,
-       u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
-{
-       u32 asd, sz, szd;
-
-       asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
-       if (((asd & ER_TAG1) != ER_ADD) ||
-           (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
-           ((asd & AD_ST_MASK) != st)) {
-               /* This is not what we want, "push" it back */
-               (*eromptr)--;
-               return 0;
-       }
-       *addrl = asd & AD_ADDR_MASK;
-       if (asd & AD_AG32)
-               *addrh = get_erom_ent(sih, eromptr, 0, 0);
-       else
-               *addrh = 0;
-       *sizeh = 0;
-       sz = asd & AD_SZ_MASK;
-       if (sz == AD_SZ_SZD) {
-               szd = get_erom_ent(sih, eromptr, 0, 0);
-               *sizel = szd & SD_SZ_MASK;
-               if (szd & SD_SG32)
-                       *sizeh = get_erom_ent(sih, eromptr, 0, 0);
-       } else
-               *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
-
-       return asd;
-}
-
-static void ai_hwfixup(struct si_info *sii)
-{
-}
-
-/* parse the enumeration rom to identify all cores */
-static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc)
-{
-       struct si_info *sii = (struct si_info *)sih;
-
-       u32 erombase;
-       u32 __iomem *eromptr, *eromlim;
-       void __iomem *regs = cc;
-
-       erombase = R_REG(&cc->eromptr);
-
-       /* Set wrappers address */
-       sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE);
-
-       /* Now point the window at the erom */
-       pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
-       eromptr = regs;
-       eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
-
-       while (eromptr < eromlim) {
-               u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
-               u32 mpd, asd, addrl, addrh, sizel, sizeh;
-               u32 __iomem *base;
-               uint i, j, idx;
-               bool br;
-
-               br = false;
-
-               /* Grok a component */
-               cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
-               if (cia == (ER_END | ER_VALID)) {
-                       /*  Found END of erom */
-                       ai_hwfixup(sii);
-                       return;
-               }
-               base = eromptr - 1;
-               cib = get_erom_ent(sih, &eromptr, 0, 0);
-
-               if ((cib & ER_TAG) != ER_CI) {
-                       /* CIA not followed by CIB */
-                       goto error;
-               }
-
-               cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
-               mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
-               crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-               nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
-               nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
-               nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
-               nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
-
-               if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
-                       continue;
-               if ((nmw + nsw == 0)) {
-                       /* A component which is not a core */
-                       if (cid == OOB_ROUTER_CORE_ID) {
-                               asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
-                                             &addrl, &addrh, &sizel, &sizeh);
-                               if (asd != 0)
-                                       sii->oob_router = addrl;
-                       }
-                       continue;
-               }
-
-               idx = sii->numcores;
-/*             sii->eromptr[idx] = base; */
-               sii->cia[idx] = cia;
-               sii->cib[idx] = cib;
-               sii->coreid[idx] = cid;
-
-               for (i = 0; i < nmp; i++) {
-                       mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
-                       if ((mpd & ER_TAG) != ER_MP) {
-                               /* Not enough MP entries for component */
-                               goto error;
-                       }
-               }
-
-               /* First Slave Address Descriptor should be port 0:
-                * the main register space for the core
-                */
-               asd =
-                   get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
-                           &sizel, &sizeh);
-               if (asd == 0) {
-                       /* Try again to see if it is a bridge */
-                       asd =
-                           get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
-                                   &addrh, &sizel, &sizeh);
-                       if (asd != 0)
-                               br = true;
-                       else if ((addrh != 0) || (sizeh != 0)
-                                || (sizel != SI_CORE_SIZE)) {
-                               /* First Slave ASD for core malformed */
-                               goto error;
-                       }
-               }
-               sii->coresba[idx] = addrl;
-               sii->coresba_size[idx] = sizel;
-               /* Get any more ASDs in port 0 */
-               j = 1;
-               do {
-                       asd =
-                           get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
-                                   &addrh, &sizel, &sizeh);
-                       if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
-                               sii->coresba2[idx] = addrl;
-                               sii->coresba2_size[idx] = sizel;
-                       }
-                       j++;
-               } while (asd != 0);
-
-               /* Go through the ASDs for other slave ports */
-               for (i = 1; i < nsp; i++) {
-                       j = 0;
-                       do {
-                               asd =
-                                   get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
-                                           &addrl, &addrh, &sizel, &sizeh);
-                       } while (asd != 0);
-                       if (j == 0) {
-                               /* SP has no address descriptors */
-                               goto error;
-                       }
-               }
-
-               /* Now get master wrappers */
-               for (i = 0; i < nmw; i++) {
-                       asd =
-                           get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
-                                   &addrh, &sizel, &sizeh);
-                       if (asd == 0) {
-                               /* Missing descriptor for MW */
-                               goto error;
-                       }
-                       if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
-                               /* Master wrapper %d is not 4KB */
-                               goto error;
-                       }
-                       if (i == 0)
-                               sii->wrapba[idx] = addrl;
-               }
-
-               /* And finally slave wrappers */
-               for (i = 0; i < nsw; i++) {
-                       uint fwp = (nsp == 1) ? 0 : 1;
-                       asd =
-                           get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
-                                   &addrl, &addrh, &sizel, &sizeh);
-                       if (asd == 0) {
-                               /* Missing descriptor for SW */
-                               goto error;
-                       }
-                       if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
-                               /* Slave wrapper is not 4KB */
-                               goto error;
-                       }
-                       if ((nmw == 0) && (i == 0))
-                               sii->wrapba[idx] = addrl;
-               }
-
-               /* Don't record bridges */
-               if (br)
-                       continue;
-
-               /* Done with core */
-               sii->numcores++;
-       }
-
- error:
-       /* Reached end of erom without finding END */
-       sii->numcores = 0;
-       return;
-}
-
-/*
- * This function changes the logical "focus" to the indicated core.
- * Return the current core's virtual address. Since each core starts with the
- * same set of registers (BIST, clock control, etc), the returned address
- * contains the first register of this 'common' register block (not to be
- * confused with 'common core').
- */
-void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx)
-{
-       struct si_info *sii = (struct si_info *)sih;
-       u32 addr = sii->coresba[coreidx];
-       u32 wrap = sii->wrapba[coreidx];
-
-       if (coreidx >= sii->numcores)
-               return NULL;
-
-       /* point bar0 window */
-       pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
-       /* point bar0 2nd 4KB window */
-       pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
-       sii->curidx = coreidx;
-
-       return sii->curmap;
-}
-
-/* Return the number of address spaces in current core */
-int ai_numaddrspaces(struct si_pub *sih)
-{
-       return 2;
-}
-
-/* Return the address of the nth address space in the current core */
-u32 ai_addrspace(struct si_pub *sih, uint asidx)
-{
-       struct si_info *sii;
-       uint cidx;
-
-       sii = (struct si_info *)sih;
-       cidx = sii->curidx;
-
-       if (asidx == 0)
-               return sii->coresba[cidx];
-       else if (asidx == 1)
-               return sii->coresba2[cidx];
-       else {
-               /* Need to parse the erom again to find addr space */
-               return 0;
-       }
-}
-
-/* Return the size of the nth address space in the current core */
-u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
-{
-       struct si_info *sii;
-       uint cidx;
-
-       sii = (struct si_info *)sih;
-       cidx = sii->curidx;
-
-       if (asidx == 0)
-               return sii->coresba_size[cidx];
-       else if (asidx == 1)
-               return sii->coresba2_size[cidx];
-       else {
-               /* Need to parse the erom again to find addr */
-               return 0;
-       }
-}
-
-uint ai_flag(struct si_pub *sih)
-{
-       struct si_info *sii;
-       struct aidmp *ai;
-
-       sii = (struct si_info *)sih;
-       ai = sii->curwrap;
-
-       return R_REG(&ai->oobselouta30) & 0x1f;
-}
-
-void ai_setint(struct si_pub *sih, int siflag)
-{
-}
-
-uint ai_corevendor(struct si_pub *sih)
-{
-       struct si_info *sii;
-       u32 cia;
-
-       sii = (struct si_info *)sih;
-       cia = sii->cia[sii->curidx];
-       return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
-}
-
-uint ai_corerev(struct si_pub *sih)
-{
-       struct si_info *sii;
-       u32 cib;
-
-       sii = (struct si_info *)sih;
-       cib = sii->cib[sii->curidx];
-       return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-bool ai_iscoreup(struct si_pub *sih)
-{
-       struct si_info *sii;
-       struct aidmp *ai;
-
-       sii = (struct si_info *)sih;
-       ai = sii->curwrap;
-
-       return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
-                SICF_CLOCK_EN)
-               && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
-}
-
-void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
-{
-       struct si_info *sii;
-       struct aidmp *ai;
-       u32 w;
-
-       sii = (struct si_info *)sih;
-
-       ai = sii->curwrap;
-
-       if (mask || val) {
-               w = ((R_REG(&ai->ioctrl) & ~mask) | val);
-               W_REG(&ai->ioctrl, w);
-       }
-}
-
-u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
-{
-       struct si_info *sii;
-       struct aidmp *ai;
-       u32 w;
-
-       sii = (struct si_info *)sih;
-       ai = sii->curwrap;
-
-       if (mask || val) {
-               w = ((R_REG(&ai->ioctrl) & ~mask) | val);
-               W_REG(&ai->ioctrl, w);
-       }
-
-       return R_REG(&ai->ioctrl);
-}
-
 /* return true if PCIE capability exists in the pci config space */
 static bool ai_ispcie(struct si_info *sii)
 {
        u8 cap_ptr;
 
        cap_ptr =
-           pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
+           pcicore_find_pci_capability(sii->pcibus, PCI_CAP_ID_EXP, NULL,
                                        NULL);
        if (!cap_ptr)
                return false;
@@ -896,117 +476,69 @@ static bool ai_buscore_prep(struct si_info *sii)
        return true;
 }
 
-u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
-{
-       struct si_info *sii;
-       struct aidmp *ai;
-       u32 w;
-
-       sii = (struct si_info *)sih;
-       ai = sii->curwrap;
-
-       if (mask || val) {
-               w = ((R_REG(&ai->iostatus) & ~mask) | val);
-               W_REG(&ai->iostatus, w);
-       }
-
-       return R_REG(&ai->iostatus);
-}
-
 static bool
-ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx)
+ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
 {
-       bool pci, pcie;
-       uint i;
-       uint pciidx, pcieidx, pcirev, pcierev;
-       struct chipcregs __iomem *cc;
+       struct bcma_device *pci = NULL;
+       struct bcma_device *pcie = NULL;
+       struct bcma_device *core;
 
-       cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
+
+       /* no cores found, bail out */
+       if (cc->bus->nr_cores == 0)
+               return false;
 
        /* get chipcommon rev */
-       sii->pub.ccrev = (int)ai_corerev(&sii->pub);
+       sii->pub.ccrev = cc->id.rev;
 
        /* get chipcommon chipstatus */
-       if (sii->pub.ccrev >= 11)
-               sii->pub.chipst = R_REG(&cc->chipstatus);
+       if (ai_get_ccrev(&sii->pub) >= 11)
+               sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
 
        /* get chipcommon capabilites */
-       sii->pub.cccaps = R_REG(&cc->capabilities);
-       /* get chipcommon extended capabilities */
-
-       if (sii->pub.ccrev >= 35)
-               sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
+       sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities));
 
        /* get pmu rev and caps */
-       if (sii->pub.cccaps & CC_CAP_PMU) {
-               sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
+       if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
+               sii->pub.pmucaps = bcma_read32(cc,
+                                              CHIPCREGOFFS(pmucapabilities));
                sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
        }
 
-       /* figure out bus/orignal core idx */
-       sii->pub.buscoretype = NODEV_CORE_ID;
-       sii->pub.buscorerev = NOREV;
-       sii->pub.buscoreidx = BADIDX;
-
-       pci = pcie = false;
-       pcirev = pcierev = NOREV;
-       pciidx = pcieidx = BADIDX;
-
-       for (i = 0; i < sii->numcores; i++) {
+       /* figure out buscore */
+       list_for_each_entry(core, &cc->bus->cores, list) {
                uint cid, crev;
 
-               ai_setcoreidx(&sii->pub, i);
-               cid = ai_coreid(&sii->pub);
-               crev = ai_corerev(&sii->pub);
+               cid = core->id.id;
+               crev = core->id.rev;
 
                if (cid == PCI_CORE_ID) {
-                       pciidx = i;
-                       pcirev = crev;
-                       pci = true;
+                       pci = core;
                } else if (cid == PCIE_CORE_ID) {
-                       pcieidx = i;
-                       pcierev = crev;
-                       pcie = true;
+                       pcie = core;
                }
-
-               /* find the core idx before entering this func. */
-               if ((savewin && (savewin == sii->coresba[i])) ||
-                   (cc == sii->regs[i]))
-                       *origidx = i;
        }
 
        if (pci && pcie) {
                if (ai_ispcie(sii))
-                       pci = false;
+                       pci = NULL;
                else
-                       pcie = false;
+                       pcie = NULL;
        }
        if (pci) {
-               sii->pub.buscoretype = PCI_CORE_ID;
-               sii->pub.buscorerev = pcirev;
-               sii->pub.buscoreidx = pciidx;
+               sii->buscore = pci;
        } else if (pcie) {
-               sii->pub.buscoretype = PCIE_CORE_ID;
-               sii->pub.buscorerev = pcierev;
-               sii->pub.buscoreidx = pcieidx;
+               sii->buscore = pcie;
        }
 
        /* fixup necessary chip/core configurations */
-       if (SI_FAST(sii)) {
-               if (!sii->pch) {
-                       sii->pch = pcicore_init(&sii->pub, sii->pbus,
-                                               (__iomem void *)PCIEREGS(sii));
-                       if (sii->pch == NULL)
-                               return false;
-               }
+       if (!sii->pch) {
+               sii->pch = pcicore_init(&sii->pub, sii->icbus->drv_pci.core);
+               if (sii->pch == NULL)
+                       return false;
        }
-       if (ai_pci_fixcfg(&sii->pub)) {
-               /* si_doattach: si_pci_fixcfg failed */
+       if (ai_pci_fixcfg(&sii->pub))
                return false;
-       }
-
-       /* return to the original core */
-       ai_setcoreidx(&sii->pub, *origidx);
 
        return true;
 }
@@ -1019,39 +551,27 @@ static __used void ai_nvram_process(struct si_info *sii)
        uint w = 0;
 
        /* do a pci config read to get subsystem id and subvendor id */
-       pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
+       pci_read_config_dword(sii->pcibus, PCI_SUBSYSTEM_VENDOR_ID, &w);
 
        sii->pub.boardvendor = w & 0xffff;
        sii->pub.boardtype = (w >> 16) & 0xffff;
-       sii->pub.boardflags = getintvar(&sii->pub, BRCMS_SROM_BOARDFLAGS);
 }
 
 static struct si_info *ai_doattach(struct si_info *sii,
-                                  void __iomem *regs, struct pci_dev *pbus)
+                                  struct bcma_bus *pbus)
 {
        struct si_pub *sih = &sii->pub;
        u32 w, savewin;
-       struct chipcregs __iomem *cc;
+       struct bcma_device *cc;
        uint socitype;
-       uint origidx;
-
-       memset((unsigned char *) sii, 0, sizeof(struct si_info));
 
        savewin = 0;
 
-       sih->buscoreidx = BADIDX;
-
-       sii->curmap = regs;
-       sii->pbus = pbus;
+       sii->icbus = pbus;
+       sii->pcibus = pbus->host_pci;
 
-       /* find Chipcommon address */
-       pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
-       if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
-               savewin = SI_ENUM_BASE;
-
-       pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
-                              SI_ENUM_BASE);
-       cc = (struct chipcregs __iomem *) regs;
+       /* switch to Chipcommon core */
+       cc = pbus->drv_cc.core;
 
        /* bus/core/clk setup for register access */
        if (!ai_buscore_prep(sii))
@@ -1064,89 +584,69 @@ static struct si_info *ai_doattach(struct si_info *sii,
         *   hosts w/o chipcommon), some way of recognizing them needs to
         *   be added here.
         */
-       w = R_REG(&cc->chipid);
+       w = bcma_read32(cc, CHIPCREGOFFS(chipid));
        socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
        /* Might as wll fill in chip id rev & pkg */
        sih->chip = w & CID_ID_MASK;
        sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
        sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
 
-       sih->issim = false;
-
        /* scan for cores */
-       if (socitype == SOCI_AI) {
-               SI_MSG("Found chip type AI (0x%08x)\n", w);
-               /* pass chipc address instead of original core base */
-               ai_scan(&sii->pub, cc);
-       } else {
-               /* Found chip of unknown type */
-               return NULL;
-       }
-       /* no cores found, bail out */
-       if (sii->numcores == 0)
+       if (socitype != SOCI_AI)
                return NULL;
 
-       /* bus/core/clk setup */
-       origidx = SI_CC_IDX;
-       if (!ai_buscore_setup(sii, savewin, &origidx))
+       SI_MSG("Found chip type AI (0x%08x)\n", w);
+       if (!ai_buscore_setup(sii, cc))
                goto exit;
 
        /* Init nvram from sprom/otp if they exist */
-       if (srom_var_init(&sii->pub, cc))
+       if (srom_var_init(&sii->pub))
                goto exit;
 
        ai_nvram_process(sii);
 
        /* === NVRAM, clock is ready === */
-       cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
-       W_REG(&cc->gpiopullup, 0);
-       W_REG(&cc->gpiopulldown, 0);
-       ai_setcoreidx(sih, origidx);
+       bcma_write32(cc, CHIPCREGOFFS(gpiopullup), 0);
+       bcma_write32(cc, CHIPCREGOFFS(gpiopulldown), 0);
 
        /* PMU specific initializations */
-       if (sih->cccaps & CC_CAP_PMU) {
-               u32 xtalfreq;
+       if (ai_get_cccaps(sih) & CC_CAP_PMU) {
                si_pmu_init(sih);
-               si_pmu_chip_init(sih);
-
-               xtalfreq = si_pmu_measure_alpclk(sih);
-               si_pmu_pll_init(sih, xtalfreq);
+               (void)si_pmu_measure_alpclk(sih);
                si_pmu_res_init(sih);
-               si_pmu_swreg_init(sih);
        }
 
        /* setup the GPIO based LED powersave register */
        w = getintvar(sih, BRCMS_SROM_LEDDC);
        if (w == 0)
                w = DEFAULT_GPIOTIMERVAL;
-       ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval),
-                  ~0, w);
+       ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
+                 ~0, w);
 
-       if (PCIE(sii))
+       if (PCIE(sih))
                pcicore_attach(sii->pch, SI_DOATTACH);
 
-       if (sih->chip == BCM43224_CHIP_ID) {
+       if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
                /*
                 * enable 12 mA drive strenth for 43224 and
                 * set chipControl register bit 15
                 */
-               if (sih->chiprev == 0) {
+               if (ai_get_chiprev(sih) == 0) {
                        SI_MSG("Applying 43224A0 WARs\n");
-                       ai_corereg(sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, chipcontrol),
-                                  CCTRL43224_GPIO_TOGGLE,
-                                  CCTRL43224_GPIO_TOGGLE);
+                       ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol),
+                                 CCTRL43224_GPIO_TOGGLE,
+                                 CCTRL43224_GPIO_TOGGLE);
                        si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
                                           CCTRL_43224A0_12MA_LED_DRIVE);
                }
-               if (sih->chiprev >= 1) {
+               if (ai_get_chiprev(sih) >= 1) {
                        SI_MSG("Applying 43224B0+ WARs\n");
                        si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
                                           CCTRL_43224B0_12MA_LED_DRIVE);
                }
        }
 
-       if (sih->chip == BCM4313_CHIP_ID) {
+       if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
                /*
                 * enable 12 mA drive strenth for 4313 and
                 * set chipControl register bit 1
@@ -1167,22 +667,19 @@ static struct si_info *ai_doattach(struct si_info *sii,
 }
 
 /*
- * Allocate a si handle.
- * devid - pci device id (used to determine chip#)
- * osh - opaque OS handle
- * regs - virtual address of initial core registers
+ * Allocate a si handle and do the attach.
  */
 struct si_pub *
-ai_attach(void __iomem *regs, struct pci_dev *sdh)
+ai_attach(struct bcma_bus *pbus)
 {
        struct si_info *sii;
 
        /* alloc struct si_info */
-       sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC);
+       sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC);
        if (sii == NULL)
                return NULL;
 
-       if (ai_doattach(sii, regs, sdh) == NULL) {
+       if (ai_doattach(sii, pbus) == NULL) {
                kfree(sii);
                return NULL;
        }
@@ -1211,292 +708,66 @@ void ai_detach(struct si_pub *sih)
        kfree(sii);
 }
 
-/* register driver interrupt disabling and restoring callback functions */
-void
-ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
-                         void *intrsrestore_fn,
-                         void *intrsenabled_fn, void *intr_arg)
-{
-       struct si_info *sii;
-
-       sii = (struct si_info *)sih;
-       sii->intr_arg = intr_arg;
-       sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn;
-       sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn;
-       sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn;
-       /* save current core id.  when this function called, the current core
-        * must be the core which provides driver functions(il, et, wl, etc.)
-        */
-       sii->dev_coreid = sii->coreid[sii->curidx];
-}
-
-void ai_deregister_intr_callback(struct si_pub *sih)
-{
-       struct si_info *sii;
-
-       sii = (struct si_info *)sih;
-       sii->intrsoff_fn = NULL;
-}
-
-uint ai_coreid(struct si_pub *sih)
-{
-       struct si_info *sii;
-
-       sii = (struct si_info *)sih;
-       return sii->coreid[sii->curidx];
-}
-
-uint ai_coreidx(struct si_pub *sih)
-{
-       struct si_info *sii;
-
-       sii = (struct si_info *)sih;
-       return sii->curidx;
-}
-
-bool ai_backplane64(struct si_pub *sih)
-{
-       return (sih->cccaps & CC_CAP_BKPLN64) != 0;
-}
-
 /* return index of coreid or BADIDX if not found */
-uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit)
+struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
 {
+       struct bcma_device *core;
        struct si_info *sii;
        uint found;
-       uint i;
 
        sii = (struct si_info *)sih;
 
        found = 0;
 
-       for (i = 0; i < sii->numcores; i++)
-               if (sii->coreid[i] == coreid) {
+       list_for_each_entry(core, &sii->icbus->cores, list)
+               if (core->id.id == coreid) {
                        if (found == coreunit)
-                               return i;
+                               return core;
                        found++;
                }
 
-       return BADIDX;
-}
-
-/*
- * This function changes logical "focus" to the indicated core;
- * must be called with interrupts off.
- * Moreover, callers should keep interrupts off during switching
- * out of and back to d11 core.
- */
-void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
-{
-       uint idx;
-
-       idx = ai_findcoreidx(sih, coreid, coreunit);
-       if (idx >= SI_MAXCORES)
-               return NULL;
-
-       return ai_setcoreidx(sih, idx);
-}
-
-/* Turn off interrupt as required by ai_setcore, before switch core */
-void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
-                            uint *intr_val)
-{
-       void __iomem *cc;
-       struct si_info *sii;
-
-       sii = (struct si_info *)sih;
-
-       if (SI_FAST(sii)) {
-               /* Overloading the origidx variable to remember the coreid,
-                * this works because the core ids cannot be confused with
-                * core indices.
-                */
-               *origidx = coreid;
-               if (coreid == CC_CORE_ID)
-                       return CCREGS_FAST(sii);
-               else if (coreid == sih->buscoretype)
-                       return PCIEREGS(sii);
-       }
-       INTR_OFF(sii, *intr_val);
-       *origidx = sii->curidx;
-       cc = ai_setcore(sih, coreid, 0);
-       return cc;
-}
-
-/* restore coreidx and restore interrupt */
-void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
-{
-       struct si_info *sii;
-
-       sii = (struct si_info *)sih;
-       if (SI_FAST(sii)
-           && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
-               return;
-
-       ai_setcoreidx(sih, coreid);
-       INTR_RESTORE(sii, intr_val);
-}
-
-void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
-{
-       struct si_info *sii = (struct si_info *)sih;
-       u32 *w = (u32 *) sii->curwrap;
-       W_REG(w + (offset / 4), val);
-       return;
+       return NULL;
 }
 
 /*
- * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
- * operation, switch back to the original core, and return the new value.
- *
- * When using the silicon backplane, no fiddling with interrupts or core
- * switches is needed.
- *
- * Also, when using pci/pcie, we can optimize away the core switching for pci
- * registers and (on newer pci cores) chipcommon registers.
+ * read/modify chipcommon core register.
  */
-uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
-               uint val)
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
 {
-       uint origidx = 0;
-       u32 __iomem *r = NULL;
-       uint w;
-       uint intr_val = 0;
-       bool fast = false;
+       struct bcma_device *cc;
+       u32 w;
        struct si_info *sii;
 
        sii = (struct si_info *)sih;
-
-       if (coreidx >= SI_MAXCORES)
-               return 0;
-
-       /*
-        * If pci/pcie, we can get at pci/pcie regs
-        * and on newer cores to chipc
-        */
-       if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
-               /* Chipc registers are mapped at 12KB */
-               fast = true;
-               r = (u32 __iomem *)((__iomem char *)sii->curmap +
-                                   PCI_16KB0_CCREGS_OFFSET + regoff);
-       } else if (sii->pub.buscoreidx == coreidx) {
-               /*
-                * pci registers are at either in the last 2KB of
-                * an 8KB window or, in pcie and pci rev 13 at 8KB
-                */
-               fast = true;
-               if (SI_FAST(sii))
-                       r = (u32 __iomem *)((__iomem char *)sii->curmap +
-                                   PCI_16KB0_PCIREGS_OFFSET + regoff);
-               else
-                       r = (u32 __iomem *)((__iomem char *)sii->curmap +
-                                   ((regoff >= SBCONFIGOFF) ?
-                                     PCI_BAR0_PCISBR_OFFSET :
-                                     PCI_BAR0_PCIREGS_OFFSET) + regoff);
-       }
-
-       if (!fast) {
-               INTR_OFF(sii, intr_val);
-
-               /* save current core index */
-               origidx = ai_coreidx(&sii->pub);
-
-               /* switch core */
-               r = (u32 __iomem *) ((unsigned char __iomem *)
-                       ai_setcoreidx(&sii->pub, coreidx) + regoff);
-       }
+       cc = sii->icbus->drv_cc.core;
 
        /* mask and set */
        if (mask || val) {
-               w = (R_REG(r) & ~mask) | val;
-               W_REG(r, w);
+               bcma_maskset32(cc, regoff, ~mask, val);
        }
 
        /* readback */
-       w = R_REG(r);
-
-       if (!fast) {
-               /* restore core index */
-               if (origidx != coreidx)
-                       ai_setcoreidx(&sii->pub, origidx);
-
-               INTR_RESTORE(sii, intr_val);
-       }
+       w = bcma_read32(cc, regoff);
 
        return w;
 }
 
-void ai_core_disable(struct si_pub *sih, u32 bits)
-{
-       struct si_info *sii;
-       u32 dummy;
-       struct aidmp *ai;
-
-       sii = (struct si_info *)sih;
-
-       ai = sii->curwrap;
-
-       /* if core is already in reset, just return */
-       if (R_REG(&ai->resetctrl) & AIRC_RESET)
-               return;
-
-       W_REG(&ai->ioctrl, bits);
-       dummy = R_REG(&ai->ioctrl);
-       udelay(10);
-
-       W_REG(&ai->resetctrl, AIRC_RESET);
-       udelay(1);
-}
-
-/* reset and re-enable a core
- * inputs:
- * bits - core specific bits that are set during and after reset sequence
- * resetbits - core specific bits that are set only during reset sequence
- */
-void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
-{
-       struct si_info *sii;
-       struct aidmp *ai;
-       u32 dummy;
-
-       sii = (struct si_info *)sih;
-       ai = sii->curwrap;
-
-       /*
-        * Must do the disable sequence first to work
-        * for arbitrary current core state.
-        */
-       ai_core_disable(sih, (bits | resetbits));
-
-       /*
-        * Now do the initialization sequence.
-        */
-       W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
-       dummy = R_REG(&ai->ioctrl);
-       W_REG(&ai->resetctrl, 0);
-       udelay(1);
-
-       W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
-       dummy = R_REG(&ai->ioctrl);
-       udelay(1);
-}
-
 /* return the slow clock source - LPO, XTAL, or PCI */
-static uint ai_slowclk_src(struct si_info *sii)
+static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc)
 {
-       struct chipcregs __iomem *cc;
+       struct si_info *sii;
        u32 val;
 
-       if (sii->pub.ccrev < 6) {
-               pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
+       sii = (struct si_info *)sih;
+       if (ai_get_ccrev(&sii->pub) < 6) {
+               pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT,
                                      &val);
                if (val & PCI_CFG_GPIO_SCS)
                        return SCC_SS_PCI;
                return SCC_SS_XTAL;
-       } else if (sii->pub.ccrev < 10) {
-               cc = (struct chipcregs __iomem *)
-                       ai_setcoreidx(&sii->pub, sii->curidx);
-               return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
+       } else if (ai_get_ccrev(&sii->pub) < 10) {
+               return bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+                      SCC_SS_MASK;
        } else                  /* Insta-clock */
                return SCC_SS_XTAL;
 }
@@ -1505,24 +776,24 @@ static uint ai_slowclk_src(struct si_info *sii)
 * return the ILP (slowclock) min or max frequency
 * precondition: we've established the chip has dynamic clk control
 */
-static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
-                           struct chipcregs __iomem *cc)
+static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq,
+                           struct bcma_device *cc)
 {
        u32 slowclk;
        uint div;
 
-       slowclk = ai_slowclk_src(sii);
-       if (sii->pub.ccrev < 6) {
+       slowclk = ai_slowclk_src(sih, cc);
+       if (ai_get_ccrev(sih) < 6) {
                if (slowclk == SCC_SS_PCI)
                        return max_freq ? (PCIMAXFREQ / 64)
                                : (PCIMINFREQ / 64);
                else
                        return max_freq ? (XTALMAXFREQ / 32)
                                : (XTALMINFREQ / 32);
-       } else if (sii->pub.ccrev < 10) {
+       } else if (ai_get_ccrev(sih) < 10) {
                div = 4 *
-                   (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
-                     SCC_CD_SHIFT) + 1);
+                   (((bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+                     SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
                if (slowclk == SCC_SS_LPO)
                        return max_freq ? LPOMAXFREQ : LPOMINFREQ;
                else if (slowclk == SCC_SS_XTAL)
@@ -1533,15 +804,15 @@ static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
                                : (PCIMINFREQ / div);
        } else {
                /* Chipc rev 10 is InstaClock */
-               div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
-               div = 4 * (div + 1);
+               div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl));
+               div = 4 * ((div >> SYCC_CD_SHIFT) + 1);
                return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
        }
        return 0;
 }
 
 static void
-ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
+ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
 {
        uint slowmaxfreq, pll_delay, slowclk;
        uint pll_on_delay, fref_sel_delay;
@@ -1554,55 +825,40 @@ ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
         * powered down by dynamic clk control logic.
         */
 
-       slowclk = ai_slowclk_src(sii);
+       slowclk = ai_slowclk_src(sih, cc);
        if (slowclk != SCC_SS_XTAL)
                pll_delay += XTAL_ON_DELAY;
 
        /* Starting with 4318 it is ILP that is used for the delays */
        slowmaxfreq =
-           ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
+           ai_slowclk_freq(sih,
+                           (ai_get_ccrev(sih) >= 10) ? false : true, cc);
 
        pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
        fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
 
-       W_REG(&cc->pll_on_delay, pll_on_delay);
-       W_REG(&cc->fref_sel_delay, fref_sel_delay);
+       bcma_write32(cc, CHIPCREGOFFS(pll_on_delay), pll_on_delay);
+       bcma_write32(cc, CHIPCREGOFFS(fref_sel_delay), fref_sel_delay);
 }
 
 /* initialize power control delay registers */
 void ai_clkctl_init(struct si_pub *sih)
 {
-       struct si_info *sii;
-       uint origidx = 0;
-       struct chipcregs __iomem *cc;
-       bool fast;
+       struct bcma_device *cc;
 
-       if (!(sih->cccaps & CC_CAP_PWR_CTL))
+       if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
                return;
 
-       sii = (struct si_info *)sih;
-       fast = SI_FAST(sii);
-       if (!fast) {
-               origidx = sii->curidx;
-               cc = (struct chipcregs __iomem *)
-                       ai_setcore(sih, CC_CORE_ID, 0);
-               if (cc == NULL)
-                       return;
-       } else {
-               cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
-               if (cc == NULL)
-                       return;
-       }
+       cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+       if (cc == NULL)
+               return;
 
        /* set all Instaclk chip ILP to 1 MHz */
-       if (sih->ccrev >= 10)
-               SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
-                       (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+       if (ai_get_ccrev(sih) >= 10)
+               bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK,
+                              (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
 
-       ai_clkctl_setdelay(sii, cc);
-
-       if (!fast)
-               ai_setcoreidx(sih, origidx);
+       ai_clkctl_setdelay(sih, cc);
 }
 
 /*
@@ -1612,47 +868,25 @@ void ai_clkctl_init(struct si_pub *sih)
 u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
 {
        struct si_info *sii;
-       uint origidx = 0;
-       struct chipcregs __iomem *cc;
+       struct bcma_device *cc;
        uint slowminfreq;
        u16 fpdelay;
-       uint intr_val = 0;
-       bool fast;
 
        sii = (struct si_info *)sih;
-       if (sih->cccaps & CC_CAP_PMU) {
-               INTR_OFF(sii, intr_val);
+       if (ai_get_cccaps(sih) & CC_CAP_PMU) {
                fpdelay = si_pmu_fast_pwrup_delay(sih);
-               INTR_RESTORE(sii, intr_val);
                return fpdelay;
        }
 
-       if (!(sih->cccaps & CC_CAP_PWR_CTL))
+       if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
                return 0;
 
-       fast = SI_FAST(sii);
        fpdelay = 0;
-       if (!fast) {
-               origidx = sii->curidx;
-               INTR_OFF(sii, intr_val);
-               cc = (struct chipcregs __iomem *)
-                       ai_setcore(sih, CC_CORE_ID, 0);
-               if (cc == NULL)
-                       goto done;
-       } else {
-               cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
-               if (cc == NULL)
-                       goto done;
-       }
-
-       slowminfreq = ai_slowclk_freq(sii, false, cc);
-       fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
-                  (slowminfreq - 1)) / slowminfreq;
-
- done:
-       if (!fast) {
-               ai_setcoreidx(sih, origidx);
-               INTR_RESTORE(sii, intr_val);
+       cc = ai_findcore(sih, CC_CORE_ID, 0);
+       if (cc) {
+               slowminfreq = ai_slowclk_freq(sih, false, cc);
+               fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2)
+                           * 1000000) + (slowminfreq - 1)) / slowminfreq;
        }
        return fpdelay;
 }
@@ -1666,12 +900,12 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
        sii = (struct si_info *)sih;
 
        /* pcie core doesn't have any mapping to control the xtal pu */
-       if (PCIE(sii))
+       if (PCIE(sih))
                return -1;
 
-       pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
-       pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
-       pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
+       pci_read_config_dword(sii->pcibus, PCI_GPIO_IN, &in);
+       pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &out);
+       pci_read_config_dword(sii->pcibus, PCI_GPIO_OUTEN, &outen);
 
        /*
         * Avoid glitching the clock if GPRS is already using it.
@@ -1692,9 +926,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
                        out |= PCI_CFG_GPIO_XTAL;
                        if (what & PLL)
                                out |= PCI_CFG_GPIO_PLL;
-                       pci_write_config_dword(sii->pbus,
+                       pci_write_config_dword(sii->pcibus,
                                               PCI_GPIO_OUT, out);
-                       pci_write_config_dword(sii->pbus,
+                       pci_write_config_dword(sii->pcibus,
                                               PCI_GPIO_OUTEN, outen);
                        udelay(XTAL_ON_DELAY);
                }
@@ -1702,7 +936,7 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
                /* turn pll on */
                if (what & PLL) {
                        out &= ~PCI_CFG_GPIO_PLL;
-                       pci_write_config_dword(sii->pbus,
+                       pci_write_config_dword(sii->pcibus,
                                               PCI_GPIO_OUT, out);
                        mdelay(2);
                }
@@ -1711,9 +945,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
                        out &= ~PCI_CFG_GPIO_XTAL;
                if (what & PLL)
                        out |= PCI_CFG_GPIO_PLL;
-               pci_write_config_dword(sii->pbus,
+               pci_write_config_dword(sii->pcibus,
                                       PCI_GPIO_OUT, out);
-               pci_write_config_dword(sii->pbus,
+               pci_write_config_dword(sii->pcibus,
                                       PCI_GPIO_OUTEN, outen);
        }
 
@@ -1723,63 +957,52 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
 /* clk control mechanism through chipcommon, no policy checking */
 static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
 {
-       uint origidx = 0;
-       struct chipcregs __iomem *cc;
+       struct bcma_device *cc;
        u32 scc;
-       uint intr_val = 0;
-       bool fast = SI_FAST(sii);
 
        /* chipcommon cores prior to rev6 don't support dynamic clock control */
-       if (sii->pub.ccrev < 6)
+       if (ai_get_ccrev(&sii->pub) < 6)
                return false;
 
-       if (!fast) {
-               INTR_OFF(sii, intr_val);
-               origidx = sii->curidx;
-               cc = (struct chipcregs __iomem *)
-                                       ai_setcore(&sii->pub, CC_CORE_ID, 0);
-       } else {
-               cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
-               if (cc == NULL)
-                       goto done;
-       }
+       cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
 
-       if (!(sii->pub.cccaps & CC_CAP_PWR_CTL) && (sii->pub.ccrev < 20))
-               goto done;
+       if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) &&
+           (ai_get_ccrev(&sii->pub) < 20))
+               return mode == CLK_FAST;
 
        switch (mode) {
        case CLK_FAST:          /* FORCEHT, fast (pll) clock */
-               if (sii->pub.ccrev < 10) {
+               if (ai_get_ccrev(&sii->pub) < 10) {
                        /*
                         * don't forget to force xtal back
                         * on before we clear SCC_DYN_XTAL..
                         */
                        ai_clkctl_xtal(&sii->pub, XTAL, ON);
-                       SET_REG(&cc->slow_clk_ctl,
-                               (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
-               } else if (sii->pub.ccrev < 20) {
-                       OR_REG(&cc->system_clk_ctl, SYCC_HR);
+                       bcma_maskset32(cc, CHIPCREGOFFS(slow_clk_ctl),
+                                      (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
+               } else if (ai_get_ccrev(&sii->pub) < 20) {
+                       bcma_set32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_HR);
                } else {
-                       OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
+                       bcma_set32(cc, CHIPCREGOFFS(clk_ctl_st), CCS_FORCEHT);
                }
 
                /* wait for the PLL */
-               if (sii->pub.cccaps & CC_CAP_PMU) {
+               if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
                        u32 htavail = CCS_HTAVAIL;
-                       SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
-                                 == 0), PMU_MAX_TRANSITION_DLY);
+                       SPINWAIT(((bcma_read32(cc, CHIPCREGOFFS(clk_ctl_st)) &
+                                  htavail) == 0), PMU_MAX_TRANSITION_DLY);
                } else {
                        udelay(PLL_DELAY);
                }
                break;
 
        case CLK_DYNAMIC:       /* enable dynamic clock control */
-               if (sii->pub.ccrev < 10) {
-                       scc = R_REG(&cc->slow_clk_ctl);
+               if (ai_get_ccrev(&sii->pub) < 10) {
+                       scc = bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl));
                        scc &= ~(SCC_FS | SCC_IP | SCC_XC);
                        if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
                                scc |= SCC_XC;
-                       W_REG(&cc->slow_clk_ctl, scc);
+                       bcma_write32(cc, CHIPCREGOFFS(slow_clk_ctl), scc);
 
                        /*
                         * for dynamic control, we have to
@@ -1787,11 +1010,11 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
                         */
                        if (scc & SCC_XC)
                                ai_clkctl_xtal(&sii->pub, XTAL, OFF);
-               } else if (sii->pub.ccrev < 20) {
+               } else if (ai_get_ccrev(&sii->pub) < 20) {
                        /* Instaclock */
-                       AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
+                       bcma_mask32(cc, CHIPCREGOFFS(system_clk_ctl), ~SYCC_HR);
                } else {
-                       AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
+                       bcma_mask32(cc, CHIPCREGOFFS(clk_ctl_st), ~CCS_FORCEHT);
                }
                break;
 
@@ -1799,11 +1022,6 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
                break;
        }
 
- done:
-       if (!fast) {
-               ai_setcoreidx(&sii->pub, origidx);
-               INTR_RESTORE(sii, intr_val);
-       }
        return mode == CLK_FAST;
 }
 
@@ -1822,46 +1040,25 @@ bool ai_clkctl_cc(struct si_pub *sih, uint mode)
        sii = (struct si_info *)sih;
 
        /* chipcommon cores prior to rev6 don't support dynamic clock control */
-       if (sih->ccrev < 6)
+       if (ai_get_ccrev(sih) < 6)
                return false;
 
-       if (PCI_FORCEHT(sii))
+       if (PCI_FORCEHT(sih))
                return mode == CLK_FAST;
 
        return _ai_clkctl_cc(sii, mode);
 }
 
-/* Build device path */
-int ai_devpath(struct si_pub *sih, char *path, int size)
-{
-       int slen;
-
-       if (!path || size <= 0)
-               return -1;
-
-       slen = snprintf(path, (size_t) size, "pci/%u/%u/",
-               ((struct si_info *)sih)->pbus->bus->number,
-               PCI_SLOT(((struct pci_dev *)
-                               (((struct si_info *)(sih))->pbus))->devfn));
-
-       if (slen < 0 || slen >= size) {
-               path[0] = '\0';
-               return -1;
-       }
-
-       return 0;
-}
-
 void ai_pci_up(struct si_pub *sih)
 {
        struct si_info *sii;
 
        sii = (struct si_info *)sih;
 
-       if (PCI_FORCEHT(sii))
+       if (PCI_FORCEHT(sih))
                _ai_clkctl_cc(sii, CLK_FAST);
 
-       if (PCIE(sii))
+       if (PCIE(sih))
                pcicore_up(sii->pch, SI_PCIUP);
 
 }
@@ -1884,7 +1081,7 @@ void ai_pci_down(struct si_pub *sih)
        sii = (struct si_info *)sih;
 
        /* release FORCEHT since chip is going to "down" state */
-       if (PCI_FORCEHT(sii))
+       if (PCI_FORCEHT(sih))
                _ai_clkctl_cc(sii, CLK_DYNAMIC);
 
        pcicore_down(sii->pch, SI_PCIDOWN);
@@ -1897,42 +1094,23 @@ void ai_pci_down(struct si_pub *sih)
 void ai_pci_setup(struct si_pub *sih, uint coremask)
 {
        struct si_info *sii;
-       struct sbpciregs __iomem *regs = NULL;
-       u32 siflag = 0, w;
-       uint idx = 0;
+       u32 w;
 
        sii = (struct si_info *)sih;
 
-       if (PCI(sii)) {
-               /* get current core index */
-               idx = sii->curidx;
-
-               /* we interrupt on this backplane flag number */
-               siflag = ai_flag(sih);
-
-               /* switch over to pci core */
-               regs = ai_setcoreidx(sih, sii->pub.buscoreidx);
-       }
-
        /*
         * Enable sb->pci interrupts.  Assume
         * PCI rev 2.3 support was added in pci core rev 6 and things changed..
         */
-       if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
+       if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) {
                /* pci config write to set this core bit in PCIIntMask */
-               pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
+               pci_read_config_dword(sii->pcibus, PCI_INT_MASK, &w);
                w |= (coremask << PCI_SBIM_SHIFT);
-               pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
-       } else {
-               /* set sbintvec bit for our flag number */
-               ai_setint(sih, siflag);
+               pci_write_config_dword(sii->pcibus, PCI_INT_MASK, w);
        }
 
-       if (PCI(sii)) {
-               pcicore_pci_setup(sii->pch, regs);
-
-               /* switch back to previous core */
-               ai_setcoreidx(sih, idx);
+       if (PCI(sih)) {
+               pcicore_pci_setup(sii->pch);
        }
 }
 
@@ -1942,25 +1120,11 @@ void ai_pci_setup(struct si_pub *sih, uint coremask)
  */
 int ai_pci_fixcfg(struct si_pub *sih)
 {
-       uint origidx;
-       void __iomem *regs = NULL;
        struct si_info *sii = (struct si_info *)sih;
 
        /* Fixup PI in SROM shadow area to enable the correct PCI core access */
-       /* save the current index */
-       origidx = ai_coreidx(&sii->pub);
-
        /* check 'pi' is correct and fix it if not */
-       regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0);
-       if (sii->pub.buscoretype == PCIE_CORE_ID)
-               pcicore_fixcfg_pcie(sii->pch,
-                                   (struct sbpcieregs __iomem *)regs);
-       else if (sii->pub.buscoretype == PCI_CORE_ID)
-               pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs);
-
-       /* restore the original index */
-       ai_setcoreidx(&sii->pub, origidx);
-
+       pcicore_fixcfg(sii->pch);
        pcicore_hwup(sii->pch);
        return 0;
 }
@@ -1971,58 +1135,42 @@ u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
        uint regoff;
 
        regoff = offsetof(struct chipcregs, gpiocontrol);
-       return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
+       return ai_cc_reg(sih, regoff, mask, val);
 }
 
 void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
 {
-       struct si_info *sii;
-       struct chipcregs __iomem *cc;
-       uint origidx;
+       struct bcma_device *cc;
        u32 val;
 
-       sii = (struct si_info *)sih;
-       origidx = ai_coreidx(sih);
-
-       cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
-
-       val = R_REG(&cc->chipcontrol);
+       cc = ai_findcore(sih, CC_CORE_ID, 0);
 
        if (on) {
-               if (sih->chippkg == 9 || sih->chippkg == 0xb)
+               if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb)
                        /* Ext PA Controls for 4331 12x9 Package */
-                       W_REG(&cc->chipcontrol, val |
-                             CCTRL4331_EXTPA_EN |
-                             CCTRL4331_EXTPA_ON_GPIO2_5);
+                       bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+                                  CCTRL4331_EXTPA_EN |
+                                  CCTRL4331_EXTPA_ON_GPIO2_5);
                else
                        /* Ext PA Controls for 4331 12x12 Package */
-                       W_REG(&cc->chipcontrol,
-                             val | CCTRL4331_EXTPA_EN);
+                       bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+                                  CCTRL4331_EXTPA_EN);
        } else {
                val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
-               W_REG(&cc->chipcontrol, val);
+               bcma_mask32(cc, CHIPCREGOFFS(chipcontrol),
+                           ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5));
        }
-
-       ai_setcoreidx(sih, origidx);
 }
 
 /* Enable BT-COEX & Ex-PA for 4313 */
 void ai_epa_4313war(struct si_pub *sih)
 {
-       struct si_info *sii;
-       struct chipcregs __iomem *cc;
-       uint origidx;
+       struct bcma_device *cc;
 
-       sii = (struct si_info *)sih;
-       origidx = ai_coreidx(sih);
-
-       cc = ai_setcore(sih, CC_CORE_ID, 0);
+       cc = ai_findcore(sih, CC_CORE_ID, 0);
 
        /* EPA Fix */
-       W_REG(&cc->gpiocontrol,
-             R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
-
-       ai_setcoreidx(sih, origidx);
+       bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK);
 }
 
 /* check if the device is removed */
@@ -2033,7 +1181,7 @@ bool ai_deviceremoved(struct si_pub *sih)
 
        sii = (struct si_info *)sih;
 
-       pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
+       pci_read_config_dword(sii->pcibus, PCI_VENDOR_ID, &w);
        if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
                return true;
 
@@ -2042,26 +1190,23 @@ bool ai_deviceremoved(struct si_pub *sih)
 
 bool ai_is_sprom_available(struct si_pub *sih)
 {
-       if (sih->ccrev >= 31) {
-               struct si_info *sii;
-               uint origidx;
-               struct chipcregs __iomem *cc;
+       struct si_info *sii = (struct si_info *)sih;
+
+       if (ai_get_ccrev(sih) >= 31) {
+               struct bcma_device *cc;
                u32 sromctrl;
 
-               if ((sih->cccaps & CC_CAP_SROM) == 0)
+               if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0)
                        return false;
 
-               sii = (struct si_info *)sih;
-               origidx = sii->curidx;
-               cc = ai_setcoreidx(sih, SI_CC_IDX);
-               sromctrl = R_REG(&cc->sromcontrol);
-               ai_setcoreidx(sih, origidx);
+               cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+               sromctrl = bcma_read32(cc, CHIPCREGOFFS(sromcontrol));
                return sromctrl & SRC_PRESENT;
        }
 
-       switch (sih->chip) {
+       switch (ai_get_chip_id(sih)) {
        case BCM4313_CHIP_ID:
-               return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+               return (sii->chipst & CST4313_SPROM_PRESENT) != 0;
        default:
                return true;
        }
@@ -2069,9 +1214,11 @@ bool ai_is_sprom_available(struct si_pub *sih)
 
 bool ai_is_otp_disabled(struct si_pub *sih)
 {
-       switch (sih->chip) {
+       struct si_info *sii = (struct si_info *)sih;
+
+       switch (ai_get_chip_id(sih)) {
        case BCM4313_CHIP_ID:
-               return (sih->chipst & CST4313_OTP_PRESENT) == 0;
+               return (sii->chipst & CST4313_OTP_PRESENT) == 0;
                /* These chips always have their OTP on */
        case BCM43224_CHIP_ID:
        case BCM43225_CHIP_ID:
@@ -2079,3 +1226,15 @@ bool ai_is_otp_disabled(struct si_pub *sih)
                return false;
        }
 }
+
+uint ai_get_buscoretype(struct si_pub *sih)
+{
+       struct si_info *sii = (struct si_info *)sih;
+       return sii->buscore->id.id;
+}
+
+uint ai_get_buscorerev(struct si_pub *sih)
+{
+       struct si_info *sii = (struct si_info *)sih;
+       return sii->buscore->id.rev;
+}
index b51d1e4..f84c6f7 100644 (file)
@@ -17,6 +17,8 @@
 #ifndef        _BRCM_AIUTILS_H_
 #define        _BRCM_AIUTILS_H_
 
+#include <linux/bcma/bcma.h>
+
 #include "types.h"
 
 /*
  *   public (read-only) portion of aiutils handle returned by si_attach()
  */
 struct si_pub {
-       uint buscoretype;       /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
-       uint buscorerev;        /* buscore rev */
-       uint buscoreidx;        /* buscore index */
        int ccrev;              /* chip common core rev */
        u32 cccaps;             /* chip common capabilities */
-       u32 cccaps_ext; /* chip common capabilities extension */
        int pmurev;             /* pmu core rev */
        u32 pmucaps;            /* pmu capabilities */
        uint boardtype;         /* board type */
        uint boardvendor;       /* board vendor */
-       uint boardflags;        /* board flags */
-       uint boardflags2;       /* board flags2 */
        uint chip;              /* chip number */
        uint chiprev;           /* chip revision */
        uint chippkg;           /* chip package option */
-       u32 chipst;             /* chip status */
-       bool issim;             /* chip is in simulation or emulation */
-       uint socirev;           /* SOC interconnect rev */
-       bool pci_pr32414;
-
 };
 
 struct pci_dev;
@@ -179,38 +170,13 @@ struct gpioh_item {
 /* misc si info needed by some of the routines */
 struct si_info {
        struct si_pub pub;      /* back plane public state (must be first) */
-       struct pci_dev *pbus;   /* handle to pci bus */
-       uint dev_coreid;        /* the core provides driver functions */
-       void *intr_arg;         /* interrupt callback function arg */
-       u32 (*intrsoff_fn) (void *intr_arg); /* turns chip interrupts off */
-       /* restore chip interrupts */
-       void (*intrsrestore_fn) (void *intr_arg, u32 arg);
-       /* check if interrupts are enabled */
-       bool (*intrsenabled_fn) (void *intr_arg);
-
+       struct bcma_bus *icbus; /* handle to soc interconnect bus */
+       struct pci_dev *pcibus; /* handle to pci bus */
        struct pcicore_info *pch; /* PCI/E core handle */
-
+       struct bcma_device *buscore;
        struct list_head var_list; /* list of srom variables */
 
-       void __iomem *curmap;                   /* current regs va */
-       void __iomem *regs[SI_MAXCORES];        /* other regs va */
-
-       uint curidx;            /* current core index */
-       uint numcores;          /* # discovered cores */
-       uint coreid[SI_MAXCORES]; /* id of each core */
-       u32 coresba[SI_MAXCORES]; /* backplane address of each core */
-       void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */
-       u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */
-       u32 coresba_size[SI_MAXCORES]; /* backplane address space size */
-       u32 coresba2_size[SI_MAXCORES]; /* second address space size */
-
-       void *curwrap;          /* current wrapper va */
-       void *wrappers[SI_MAXCORES];    /* other cores wrapper va */
-       u32 wrapba[SI_MAXCORES];        /* address of controlling wrapper */
-
-       u32 cia[SI_MAXCORES];   /* erom cia entry for each core */
-       u32 cib[SI_MAXCORES];   /* erom cia entry for each core */
-       u32 oob_router; /* oob router registers for axi */
+       u32 chipst;             /* chip status */
 };
 
 /*
@@ -223,52 +189,15 @@ struct si_info {
 
 
 /* AMBA Interconnect exported externs */
-extern uint ai_flag(struct si_pub *sih);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern uint ai_coreidx(struct si_pub *sih);
-extern uint ai_corevendor(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
-                      uint val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern int ai_numaddrspaces(struct si_pub *sih);
-extern u32 ai_addrspace(struct si_pub *sih, uint asidx);
-extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx);
-extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val);
+extern struct bcma_device *ai_findcore(struct si_pub *sih,
+                                      u16 coreid, u16 coreunit);
+extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
 
 /* === exported functions === */
-extern struct si_pub *ai_attach(void __iomem *regs, struct pci_dev *sdh);
+extern struct si_pub *ai_attach(struct bcma_bus *pbus);
 extern void ai_detach(struct si_pub *sih);
-extern uint ai_coreid(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
-               uint val);
-extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx);
-extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_switch_core(struct si_pub *sih, uint coreid,
-                                   uint *origidx, uint *intr_val);
-extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern u32 ai_alp_clock(struct si_pub *sih);
-extern u32 ai_ilp_clock(struct si_pub *sih);
+extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
 extern void ai_pci_setup(struct si_pub *sih, uint coremask);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern bool ai_backplane64(struct si_pub *sih);
-extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
-                                     void *intrsrestore_fn,
-                                     void *intrsenabled_fn, void *intr_arg);
-extern void ai_deregister_intr_callback(struct si_pub *sih);
 extern void ai_clkctl_init(struct si_pub *sih);
 extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
 extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
@@ -283,13 +212,6 @@ extern bool ai_is_otp_disabled(struct si_pub *sih);
 /* SPROM availability */
 extern bool ai_is_sprom_available(struct si_pub *sih);
 
-/*
- * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
- * The returned path is NULL terminated and has trailing '/'.
- * Return 0 on success, nonzero otherwise.
- */
-extern int ai_devpath(struct si_pub *sih, char *path, int size);
-
 extern void ai_pci_sleep(struct si_pub *sih);
 extern void ai_pci_down(struct si_pub *sih);
 extern void ai_pci_up(struct si_pub *sih);
@@ -299,4 +221,52 @@ extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
 /* Enable Ex-PA for 4313 */
 extern void ai_epa_4313war(struct si_pub *sih);
 
+extern uint ai_get_buscoretype(struct si_pub *sih);
+extern uint ai_get_buscorerev(struct si_pub *sih);
+
+static inline int ai_get_ccrev(struct si_pub *sih)
+{
+       return sih->ccrev;
+}
+
+static inline u32 ai_get_cccaps(struct si_pub *sih)
+{
+       return sih->cccaps;
+}
+
+static inline int ai_get_pmurev(struct si_pub *sih)
+{
+       return sih->pmurev;
+}
+
+static inline u32 ai_get_pmucaps(struct si_pub *sih)
+{
+       return sih->pmucaps;
+}
+
+static inline uint ai_get_boardtype(struct si_pub *sih)
+{
+       return sih->boardtype;
+}
+
+static inline uint ai_get_boardvendor(struct si_pub *sih)
+{
+       return sih->boardvendor;
+}
+
+static inline uint ai_get_chip_id(struct si_pub *sih)
+{
+       return sih->chip;
+}
+
+static inline uint ai_get_chiprev(struct si_pub *sih)
+{
+       return sih->chiprev;
+}
+
+static inline uint ai_get_chippkg(struct si_pub *sih)
+{
+       return sih->chippkg;
+}
+
 #endif                         /* _BRCM_AIUTILS_H_ */
index 43f7a72..90911ee 100644 (file)
@@ -1118,14 +1118,17 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
                u8 status_delay = 0;
 
                /* wait till the next 8 bytes of txstatus is available */
-               while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) {
+               s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
+               while ((s1 & TXS_V) == 0) {
                        udelay(1);
                        status_delay++;
                        if (status_delay > 10)
                                return; /* error condition */
+                       s1 = bcma_read32(wlc->hw->d11core,
+                                        D11REGOFFS(frmtxstatus));
                }
 
-               s2 = R_REG(&wlc->regs->frmtxstatus2);
+               s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
        }
 
        if (scb) {
index ed51616..1948cb2 100644 (file)
@@ -430,6 +430,9 @@ struct d11regs {
        u16 PAD[0x380]; /* 0x800 - 0xEFE */
 };
 
+/* d11 register field offset */
+#define D11REGOFFS(field)      offsetof(struct d11regs, field)
+
 #define        PIHR_BASE       0x0400  /* byte address of packed IHR region */
 
 /* biststatus */
index 0bb8c37..2e90a9a 100644 (file)
 #include "dma.h"
 #include "soc.h"
 
+/*
+ * dma register field offset calculation
+ */
+#define DMA64REGOFFS(field)            offsetof(struct dma64regs, field)
+#define DMA64TXREGOFFS(di, field)      (di->d64txregbase + DMA64REGOFFS(field))
+#define DMA64RXREGOFFS(di, field)      (di->d64rxregbase + DMA64REGOFFS(field))
+
 /*
  * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
  * a contiguous 8kB physical address.
@@ -220,15 +227,16 @@ struct dma_info {
        uint *msg_level;        /* message level pointer */
        char name[MAXNAMEL];    /* callers name for diag msgs */
 
-       struct pci_dev *pbus;           /* bus handle */
+       struct bcma_device *core;
+       struct device *dmadev;
 
        bool dma64;     /* this dma engine is operating in 64-bit mode */
        bool addrext;   /* this dma engine supports DmaExtendedAddrChanges */
 
        /* 64-bit dma tx engine registers */
-       struct dma64regs __iomem *d64txregs;
+       uint d64txregbase;
        /* 64-bit dma rx engine registers */
-       struct dma64regs __iomem *d64rxregs;
+       uint d64rxregbase;
        /* pointer to dma64 tx descriptor ring */
        struct dma64desc *txd64;
        /* pointer to dma64 rx descriptor ring */
@@ -375,15 +383,16 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
        if (dmactrlflags & DMA_CTRL_PEN) {
                u32 control;
 
-               control = R_REG(&di->d64txregs->control);
-               W_REG(&di->d64txregs->control,
+               control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
+               bcma_write32(di->core, DMA64TXREGOFFS(di, control),
                      control | D64_XC_PD);
-               if (R_REG(&di->d64txregs->control) & D64_XC_PD)
+               if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
+                   D64_XC_PD)
                        /* We *can* disable it so it is supported,
                         * restore control register
                         */
-                       W_REG(&di->d64txregs->control,
-                       control);
+                       bcma_write32(di->core, DMA64TXREGOFFS(di, control),
+                                    control);
                else
                        /* Not supported, don't allow it to be enabled */
                        dmactrlflags &= ~DMA_CTRL_PEN;
@@ -394,12 +403,12 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
        return dmactrlflags;
 }
 
-static bool _dma64_addrext(struct dma64regs __iomem *dma64regs)
+static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
 {
        u32 w;
-       OR_REG(&dma64regs->control, D64_XC_AE);
-       w = R_REG(&dma64regs->control);
-       AND_REG(&dma64regs->control, ~D64_XC_AE);
+       bcma_set32(di->core, ctrl_offset, D64_XC_AE);
+       w = bcma_read32(di->core, ctrl_offset);
+       bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
        return (w & D64_XC_AE) == D64_XC_AE;
 }
 
@@ -412,13 +421,13 @@ static bool _dma_isaddrext(struct dma_info *di)
        /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
 
        /* not all tx or rx channel are available */
-       if (di->d64txregs != NULL) {
-               if (!_dma64_addrext(di->d64txregs))
+       if (di->d64txregbase != 0) {
+               if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
                        DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
                                  di->name);
                return true;
-       } else if (di->d64rxregs != NULL) {
-               if (!_dma64_addrext(di->d64rxregs))
+       } else if (di->d64rxregbase != 0) {
+               if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
                        DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
                                  di->name);
                return true;
@@ -432,14 +441,14 @@ static bool _dma_descriptor_align(struct dma_info *di)
        u32 addrl;
 
        /* Check to see if the descriptors need to be aligned on 4K/8K or not */
-       if (di->d64txregs != NULL) {
-               W_REG(&di->d64txregs->addrlow, 0xff0);
-               addrl = R_REG(&di->d64txregs->addrlow);
+       if (di->d64txregbase != 0) {
+               bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
+               addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
                if (addrl != 0)
                        return false;
-       } else if (di->d64rxregs != NULL) {
-               W_REG(&di->d64rxregs->addrlow, 0xff0);
-               addrl = R_REG(&di->d64rxregs->addrlow);
+       } else if (di->d64rxregbase != 0) {
+               bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
+               addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
                if (addrl != 0)
                        return false;
        }
@@ -450,7 +459,7 @@ static bool _dma_descriptor_align(struct dma_info *di)
  * Descriptor table must start at the DMA hardware dictated alignment, so
  * allocated memory must be large enough to support this requirement.
  */
-static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
+static void *dma_alloc_consistent(struct dma_info *di, uint size,
                                  u16 align_bits, uint *alloced,
                                  dma_addr_t *pap)
 {
@@ -460,7 +469,7 @@ static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
                        size += align;
                *alloced = size;
        }
-       return pci_alloc_consistent(pdev, size, pap);
+       return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC);
 }
 
 static
@@ -486,7 +495,7 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
        u32 desc_strtaddr;
        u32 alignbytes = 1 << *alignbits;
 
-       va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
+       va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
 
        if (NULL == va)
                return NULL;
@@ -495,8 +504,8 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
        if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
                                                        & boundary)) {
                *alignbits = dma_align_sizetobits(size);
-               pci_free_consistent(di->pbus, size, va, *descpa);
-               va = dma_alloc_consistent(di->pbus, size, *alignbits,
+               dma_free_coherent(di->dmadev, size, va, *descpa);
+               va = dma_alloc_consistent(di, size, *alignbits,
                        alloced, descpa);
        }
        return va;
@@ -556,12 +565,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
 }
 
 struct dma_pub *dma_attach(char *name, struct si_pub *sih,
-                    void __iomem *dmaregstx, void __iomem *dmaregsrx,
-                    uint ntxd, uint nrxd,
-                    uint rxbufsize, int rxextheadroom,
-                    uint nrxpost, uint rxoffset, uint *msg_level)
+                          struct bcma_device *core,
+                          uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
+                          uint rxbufsize, int rxextheadroom,
+                          uint nrxpost, uint rxoffset, uint *msg_level)
 {
        struct dma_info *di;
+       u8 rev = core->id.rev;
        uint size;
 
        /* allocate private info structure */
@@ -572,11 +582,13 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
        di->msg_level = msg_level ? msg_level : &dma_msg_level;
 
 
-       di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
+       di->dma64 =
+               ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
 
-       /* init dma reg pointer */
-       di->d64txregs = (struct dma64regs __iomem *) dmaregstx;
-       di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx;
+       /* init dma reg info */
+       di->core = core;
+       di->d64txregbase = txregbase;
+       di->d64rxregbase = rxregbase;
 
        /*
         * Default flags (which can be changed by the driver calling
@@ -585,16 +597,17 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
         */
        _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
 
-       DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
-                 name, "DMA64",
+       DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
+                 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
+                 "txregbase %u rxregbase %u\n", name, "DMA64",
                  di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
-                 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx);
+                 rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
 
        /* make a private copy of our callers name */
        strncpy(di->name, name, MAXNAMEL);
        di->name[MAXNAMEL - 1] = '\0';
 
-       di->pbus = ((struct si_info *)sih)->pbus;
+       di->dmadev = core->dma_dev;
 
        /* save tunables */
        di->ntxd = (u16) ntxd;
@@ -626,12 +639,12 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
        di->dataoffsetlow = di->ddoffsetlow;
        di->dataoffsethigh = di->ddoffsethigh;
        /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
-       if ((ai_coreid(sih) == SDIOD_CORE_ID)
-           && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
-               di->addrext = 0;
-       else if ((ai_coreid(sih) == I2S_CORE_ID) &&
-                ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
-               di->addrext = 0;
+       if ((core->id.id == SDIOD_CORE_ID)
+           && ((rev > 0) && (rev <= 2)))
+               di->addrext = false;
+       else if ((core->id.id == I2S_CORE_ID) &&
+                ((rev == 0) || (rev == 1)))
+               di->addrext = false;
        else
                di->addrext = _dma_isaddrext(di);
 
@@ -749,13 +762,13 @@ void dma_detach(struct dma_pub *pub)
 
        /* free dma descriptor rings */
        if (di->txd64)
-               pci_free_consistent(di->pbus, di->txdalloc,
-                                   ((s8 *)di->txd64 - di->txdalign),
-                                   (di->txdpaorig));
+               dma_free_coherent(di->dmadev, di->txdalloc,
+                                 ((s8 *)di->txd64 - di->txdalign),
+                                 (di->txdpaorig));
        if (di->rxd64)
-               pci_free_consistent(di->pbus, di->rxdalloc,
-                                   ((s8 *)di->rxd64 - di->rxdalign),
-                                   (di->rxdpaorig));
+               dma_free_coherent(di->dmadev, di->rxdalloc,
+                                 ((s8 *)di->rxd64 - di->rxdalign),
+                                 (di->rxdpaorig));
 
        /* free packet pointer vectors */
        kfree(di->txp);
@@ -780,11 +793,15 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
        if ((di->ddoffsetlow == 0)
            || !(pa & PCI32ADDR_HIGH)) {
                if (direction == DMA_TX) {
-                       W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
-                       W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
+                       bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+                                    pa + di->ddoffsetlow);
+                       bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+                                    di->ddoffsethigh);
                } else {
-                       W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
-                       W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
+                       bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+                                    pa + di->ddoffsetlow);
+                       bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+                                    di->ddoffsethigh);
                }
        } else {
                /* DMA64 32bits address extension */
@@ -795,15 +812,19 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
                pa &= ~PCI32ADDR_HIGH;
 
                if (direction == DMA_TX) {
-                       W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
-                       W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
-                       SET_REG(&di->d64txregs->control,
-                               D64_XC_AE, (ae << D64_XC_AE_SHIFT));
+                       bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+                                    pa + di->ddoffsetlow);
+                       bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+                                    di->ddoffsethigh);
+                       bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
+                                      D64_XC_AE, (ae << D64_XC_AE_SHIFT));
                } else {
-                       W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
-                       W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
-                       SET_REG(&di->d64rxregs->control,
-                               D64_RC_AE, (ae << D64_RC_AE_SHIFT));
+                       bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+                                    pa + di->ddoffsetlow);
+                       bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+                                    di->ddoffsethigh);
+                       bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
+                                      D64_RC_AE, (ae << D64_RC_AE_SHIFT));
                }
        }
 }
@@ -815,9 +836,9 @@ static void _dma_rxenable(struct dma_info *di)
 
        DMA_TRACE("%s:\n", di->name);
 
-       control =
-           (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
-           D64_RC_RE;
+       control = D64_RC_RE | (bcma_read32(di->core,
+                                          DMA64RXREGOFFS(di, control)) &
+                              D64_RC_AE);
 
        if ((dmactrlflags & DMA_CTRL_PEN) == 0)
                control |= D64_RC_PD;
@@ -825,7 +846,7 @@ static void _dma_rxenable(struct dma_info *di)
        if (dmactrlflags & DMA_CTRL_ROC)
                control |= D64_RC_OC;
 
-       W_REG(&di->d64rxregs->control,
+       bcma_write32(di->core, DMA64RXREGOFFS(di, control),
                ((di->rxoffset << D64_RC_RO_SHIFT) | control));
 }
 
@@ -868,7 +889,8 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
                return NULL;
 
        curr =
-           B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
+           B2I(((bcma_read32(di->core,
+                             DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
                 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
 
        /* ignore curr if forceall */
@@ -882,7 +904,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
        pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
 
        /* clear this packet from the descriptor ring */
-       pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
+       dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE);
 
        di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
        di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
@@ -950,12 +972,12 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
                if (resid > 0) {
                        uint cur;
                        cur =
-                           B2I(((R_REG(&di->d64rxregs->status0) &
-                                 D64_RS0_CD_MASK) -
-                                di->rcvptrbase) & D64_RS0_CD_MASK,
-                               struct dma64desc);
+                           B2I(((bcma_read32(di->core,
+                                             DMA64RXREGOFFS(di, status0)) &
+                                 D64_RS0_CD_MASK) - di->rcvptrbase) &
+                               D64_RS0_CD_MASK, struct dma64desc);
                        DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
-                                 di->rxin, di->rxout, cur);
+                                  di->rxin, di->rxout, cur);
                }
 #endif                         /* BCMDBG */
 
@@ -983,8 +1005,10 @@ static bool dma64_rxidle(struct dma_info *di)
        if (di->nrxd == 0)
                return true;
 
-       return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
-               (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
+       return ((bcma_read32(di->core,
+                            DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
+               (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
+                D64_RS0_CD_MASK));
 }
 
 /*
@@ -1048,8 +1072,8 @@ bool dma_rxfill(struct dma_pub *pub)
                 */
                *(u32 *) (p->data) = 0;
 
-               pa = pci_map_single(di->pbus, p->data,
-                       di->rxbufsize, PCI_DMA_FROMDEVICE);
+               pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
+                                   DMA_FROM_DEVICE);
 
                /* save the free packet pointer */
                di->rxp[rxout] = p;
@@ -1067,7 +1091,7 @@ bool dma_rxfill(struct dma_pub *pub)
        di->rxout = rxout;
 
        /* update the chip lastdscr pointer */
-       W_REG(&di->d64rxregs->ptr,
+       bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
              di->rcvptrbase + I2B(rxout, struct dma64desc));
 
        return ring_empty;
@@ -1128,7 +1152,7 @@ void dma_txinit(struct dma_pub *pub)
 
        if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
                control |= D64_XC_PD;
-       OR_REG(&di->d64txregs->control, control);
+       bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
 
        /* DMA engine with alignment requirement requires table to be inited
         * before enabling the engine
@@ -1146,7 +1170,7 @@ void dma_txsuspend(struct dma_pub *pub)
        if (di->ntxd == 0)
                return;
 
-       OR_REG(&di->d64txregs->control, D64_XC_SE);
+       bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
 }
 
 void dma_txresume(struct dma_pub *pub)
@@ -1158,7 +1182,7 @@ void dma_txresume(struct dma_pub *pub)
        if (di->ntxd == 0)
                return;
 
-       AND_REG(&di->d64txregs->control, ~D64_XC_SE);
+       bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
 }
 
 bool dma_txsuspended(struct dma_pub *pub)
@@ -1166,8 +1190,9 @@ bool dma_txsuspended(struct dma_pub *pub)
        struct dma_info *di = (struct dma_info *)pub;
 
        return (di->ntxd == 0) ||
-           ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
-            D64_XC_SE);
+              ((bcma_read32(di->core,
+                            DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
+               D64_XC_SE);
 }
 
 void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
@@ -1200,16 +1225,17 @@ bool dma_txreset(struct dma_pub *pub)
                return true;
 
        /* suspend tx DMA first */
-       W_REG(&di->d64txregs->control, D64_XC_SE);
+       bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
        SPINWAIT(((status =
-                  (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
-                 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
-                && (status != D64_XS0_XS_STOPPED), 10000);
+                  (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+                   D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
+                 (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
+                10000);
 
-       W_REG(&di->d64txregs->control, 0);
+       bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
        SPINWAIT(((status =
-                  (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
-                 != D64_XS0_XS_DISABLED), 10000);
+                  (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+                   D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
 
        /* wait for the last transaction to complete */
        udelay(300);
@@ -1225,10 +1251,10 @@ bool dma_rxreset(struct dma_pub *pub)
        if (di->nrxd == 0)
                return true;
 
-       W_REG(&di->d64rxregs->control, 0);
+       bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
        SPINWAIT(((status =
-                  (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
-                 != D64_RS0_RS_DISABLED), 10000);
+                  (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
+                   D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
 
        return status == D64_RS0_RS_DISABLED;
 }
@@ -1239,10 +1265,9 @@ bool dma_rxreset(struct dma_pub *pub)
  *   the error(toss frames) could be fatal and cause many subsequent hard
  *   to debug problems
  */
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
+int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
 {
        struct dma_info *di = (struct dma_info *)pub;
-       struct sk_buff *p, *next;
        unsigned char *data;
        uint len;
        u16 txout;
@@ -1254,57 +1279,44 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
        txout = di->txout;
 
        /*
-        * Walk the chain of packet buffers
-        * allocating and initializing transmit descriptor entries.
+        * obtain and initialize transmit descriptor entry.
         */
-       for (p = p0; p; p = next) {
-               data = p->data;
-               len = p->len;
-               next = p->next;
-
-               /* return nonzero if out of tx descriptors */
-               if (nexttxd(di, txout) == di->txin)
-                       goto outoftxd;
-
-               if (len == 0)
-                       continue;
+       data = p->data;
+       len = p->len;
 
-               /* get physical address of buffer start */
-               pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
+       /* no use to transmit a zero length packet */
+       if (len == 0)
+               return 0;
 
-               flags = 0;
-               if (p == p0)
-                       flags |= D64_CTRL1_SOF;
+       /* return nonzero if out of tx descriptors */
+       if (nexttxd(di, txout) == di->txin)
+               goto outoftxd;
 
-               /* With a DMA segment list, Descriptor table is filled
-                * using the segment list instead of looping over
-                * buffers in multi-chain DMA. Therefore, EOF for SGLIST
-                * is when end of segment list is reached.
-                */
-               if (next == NULL)
-                       flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
-               if (txout == (di->ntxd - 1))
-                       flags |= D64_CTRL1_EOT;
+       /* get physical address of buffer start */
+       pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
 
-               dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
+       /* With a DMA segment list, Descriptor table is filled
+        * using the segment list instead of looping over
+        * buffers in multi-chain DMA. Therefore, EOF for SGLIST
+        * is when end of segment list is reached.
+        */
+       flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
+       if (txout == (di->ntxd - 1))
+               flags |= D64_CTRL1_EOT;
 
-               txout = nexttxd(di, txout);
-       }
+       dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
 
-       /* if last txd eof not set, fix it */
-       if (!(flags & D64_CTRL1_EOF))
-               di->txd64[prevtxd(di, txout)].ctrl1 =
-                    cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
+       txout = nexttxd(di, txout);
 
        /* save the packet */
-       di->txp[prevtxd(di, txout)] = p0;
+       di->txp[prevtxd(di, txout)] = p;
 
        /* bump the tx descriptor index */
        di->txout = txout;
 
        /* kick the chip */
        if (commit)
-               W_REG(&di->d64txregs->ptr,
+               bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
                      di->xmtptrbase + I2B(txout, struct dma64desc));
 
        /* tx flow control */
@@ -1314,7 +1326,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
 
  outoftxd:
        DMA_ERROR("%s: out of txds !!!\n", di->name);
-       brcmu_pkt_buf_free_skb(p0);
+       brcmu_pkt_buf_free_skb(p);
        di->dma.txavail = 0;
        di->dma.txnobuf++;
        return -1;
@@ -1352,16 +1364,15 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
        if (range == DMA_RANGE_ALL)
                end = di->txout;
        else {
-               struct dma64regs __iomem *dregs = di->d64txregs;
-
-               end = (u16) (B2I(((R_REG(&dregs->status0) &
-                                D64_XS0_CD_MASK) -
-                                di->xmtptrbase) & D64_XS0_CD_MASK,
-                                struct dma64desc));
+               end = (u16) (B2I(((bcma_read32(di->core,
+                                              DMA64TXREGOFFS(di, status0)) &
+                                  D64_XS0_CD_MASK) - di->xmtptrbase) &
+                                D64_XS0_CD_MASK, struct dma64desc));
 
                if (range == DMA_RANGE_TRANSFERED) {
                        active_desc =
-                           (u16) (R_REG(&dregs->status1) &
+                               (u16)(bcma_read32(di->core,
+                                                 DMA64TXREGOFFS(di, status1)) &
                                      D64_XS1_AD_MASK);
                        active_desc =
                            (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
@@ -1390,7 +1401,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
                txp = di->txp[i];
                di->txp[i] = NULL;
 
-               pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
+               dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE);
        }
 
        di->txin = i;
index d317c7c..cc269ee 100644 (file)
@@ -75,10 +75,11 @@ struct dma_pub {
 };
 
 extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
-                           void __iomem *dmaregstx, void __iomem *dmaregsrx,
-                           uint ntxd, uint nrxd,
-                           uint rxbufsize, int rxextheadroom,
-                           uint nrxpost, uint rxoffset, uint *msg_level);
+                                 struct bcma_device *d11core,
+                                 uint txregbase, uint rxregbase,
+                                 uint ntxd, uint nrxd,
+                                 uint rxbufsize, int rxextheadroom,
+                                 uint nrxpost, uint rxoffset, uint *msg_level);
 
 void dma_rxinit(struct dma_pub *pub);
 int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
index ba3e4b5..d106576 100644 (file)
 #define __UNDEF_NO_VERSION__
 
 #include <linux/etherdevice.h>
-#include <linux/pci.h>
 #include <linux/sched.h>
 #include <linux/firmware.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/bcma/bcma.h>
 #include <net/mac80211.h>
 #include <defs.h>
 #include "nicpci.h"
 #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
        FIF_ALLMULTI | \
        FIF_FCSFAIL | \
-       FIF_PLCPFAIL | \
        FIF_CONTROL | \
        FIF_OTHER_BSS | \
-       FIF_BCN_PRBRESP_PROMISC)
+       FIF_BCN_PRBRESP_PROMISC | \
+       FIF_PSPOLL)
 
 #define CHAN2GHZ(channel, freqency, chflags)  { \
        .band = IEEE80211_BAND_2GHZ, \
@@ -87,16 +87,14 @@ MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
 MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
 
-/* recognized PCI IDs */
-static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = {
-       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, /* 43225 2G */
-       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, /* 43224 DUAL */
-       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, /* 4313 DUAL */
-       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, /* 43224 Ven */
-       {0}
-};
 
-MODULE_DEVICE_TABLE(pci, brcms_pci_id_table);
+/* recognized BCMA Core IDs */
+static struct bcma_device_id brcms_coreid_table[] = {
+       BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS),
+       BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS),
+       BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
 
 #ifdef BCMDBG
 static int msglevel = 0xdeadbeef;
@@ -373,7 +371,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
                                                   conf->listen_interval);
        }
        if (changed & IEEE80211_CONF_CHANGE_MONITOR)
-               wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n",
+               wiphy_dbg(wiphy, "%s: change monitor mode: %s\n",
                          __func__, conf->flags & IEEE80211_CONF_MONITOR ?
                          "true" : "false");
        if (changed & IEEE80211_CONF_CHANGE_PS)
@@ -550,29 +548,25 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
 
        changed_flags &= MAC_FILTERS;
        *total_flags &= MAC_FILTERS;
+
        if (changed_flags & FIF_PROMISC_IN_BSS)
-               wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n");
+               wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n");
        if (changed_flags & FIF_ALLMULTI)
-               wiphy_err(wiphy, "FIF_ALLMULTI\n");
+               wiphy_dbg(wiphy, "FIF_ALLMULTI\n");
        if (changed_flags & FIF_FCSFAIL)
-               wiphy_err(wiphy, "FIF_FCSFAIL\n");
-       if (changed_flags & FIF_PLCPFAIL)
-               wiphy_err(wiphy, "FIF_PLCPFAIL\n");
+               wiphy_dbg(wiphy, "FIF_FCSFAIL\n");
        if (changed_flags & FIF_CONTROL)
-               wiphy_err(wiphy, "FIF_CONTROL\n");
+               wiphy_dbg(wiphy, "FIF_CONTROL\n");
        if (changed_flags & FIF_OTHER_BSS)
-               wiphy_err(wiphy, "FIF_OTHER_BSS\n");
-       if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
-               spin_lock_bh(&wl->lock);
-               if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
-                       wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
-                       brcms_c_mac_bcn_promisc_change(wl->wlc, 1);
-               } else {
-                       brcms_c_mac_bcn_promisc_change(wl->wlc, 0);
-                       wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
-               }
-               spin_unlock_bh(&wl->lock);
-       }
+               wiphy_dbg(wiphy, "FIF_OTHER_BSS\n");
+       if (changed_flags & FIF_PSPOLL)
+               wiphy_dbg(wiphy, "FIF_PSPOLL\n");
+       if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
+               wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n");
+
+       spin_lock_bh(&wl->lock);
+       brcms_c_mac_promisc(wl->wlc, *total_flags);
+       spin_unlock_bh(&wl->lock);
        return;
 }
 
@@ -728,7 +722,7 @@ static const struct ieee80211_ops brcms_ops = {
 };
 
 /*
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
  */
 static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
 {
@@ -868,25 +862,15 @@ static void brcms_free(struct brcms_info *wl)
 #endif
                kfree(t);
        }
-
-       /*
-        * unregister_netdev() calls get_stats() which may read chip
-        * registers so we cannot unmap the chip registers until
-        * after calling unregister_netdev() .
-        */
-       if (wl->regsva)
-               iounmap(wl->regsva);
-
-       wl->regsva = NULL;
 }
 
 /*
 * called from both kernel as from this kernel module (error flow on attach)
 * precondition: perimeter lock is not acquired.
 */
-static void brcms_remove(struct pci_dev *pdev)
+static void brcms_remove(struct bcma_device *pdev)
 {
-       struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+       struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
        struct brcms_info *wl = hw->priv;
 
        if (wl->wlc) {
@@ -894,11 +878,10 @@ static void brcms_remove(struct pci_dev *pdev)
                wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
                ieee80211_unregister_hw(hw);
        }
-       pci_disable_device(pdev);
 
        brcms_free(wl);
 
-       pci_set_drvdata(pdev, NULL);
+       bcma_set_drvdata(pdev, NULL);
        ieee80211_free_hw(hw);
 }
 
@@ -1006,11 +989,9 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
  * it as static.
  *
  *
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
  */
-static struct brcms_info *brcms_attach(u16 vendor, u16 device,
-                                      resource_size_t regs,
-                                      struct pci_dev *btparam, uint irq)
+static struct brcms_info *brcms_attach(struct bcma_device *pdev)
 {
        struct brcms_info *wl = NULL;
        int unit, err;
@@ -1024,7 +1005,7 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
                return NULL;
 
        /* allocate private info */
-       hw = pci_get_drvdata(btparam);  /* btparam == pdev */
+       hw = bcma_get_drvdata(pdev);
        if (hw != NULL)
                wl = hw->priv;
        if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL))
@@ -1036,26 +1017,20 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
        /* setup the bottom half handler */
        tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
 
-       wl->regsva = ioremap_nocache(regs, PCI_BAR0_WINSZ);
-       if (wl->regsva == NULL) {
-               wiphy_err(wl->wiphy, "wl%d: ioremap() failed\n", unit);
-               goto fail;
-       }
        spin_lock_init(&wl->lock);
        spin_lock_init(&wl->isr_lock);
 
        /* prepare ucode */
-       if (brcms_request_fw(wl, btparam) < 0) {
+       if (brcms_request_fw(wl, pdev->bus->host_pci) < 0) {
                wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
                          "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
                brcms_release_fw(wl);
-               brcms_remove(btparam);
+               brcms_remove(pdev);
                return NULL;
        }
 
        /* common load-time initialization */
-       wl->wlc = brcms_c_attach(wl, vendor, device, unit, false,
-                                wl->regsva, btparam, &err);
+       wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
        brcms_release_fw(wl);
        if (!wl->wlc) {
                wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
@@ -1067,11 +1042,12 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
        wl->pub->ieee_hw = hw;
 
        /* register our interrupt handler */
-       if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
+       if (request_irq(pdev->bus->host_pci->irq, brcms_isr,
+                       IRQF_SHARED, KBUILD_MODNAME, wl)) {
                wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
                goto fail;
        }
-       wl->irq = irq;
+       wl->irq = pdev->bus->host_pci->irq;
 
        /* register module */
        brcms_c_module_register(wl->pub, "linux", wl, NULL);
@@ -1118,37 +1094,18 @@ fail:
  *
  * Perimeter lock is initialized in the course of this function.
  */
-static int __devinit
-brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
 {
-       int rc;
        struct brcms_info *wl;
        struct ieee80211_hw *hw;
-       u32 val;
-
-       dev_info(&pdev->dev, "bus %d slot %d func %d irq %d\n",
-              pdev->bus->number, PCI_SLOT(pdev->devfn),
-              PCI_FUNC(pdev->devfn), pdev->irq);
 
-       if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) ||
-           ((pdev->device != 0x0576) &&
-            ((pdev->device & 0xff00) != 0x4300) &&
-            ((pdev->device & 0xff00) != 0x4700) &&
-            ((pdev->device < 43000) || (pdev->device > 43999))))
-               return -ENODEV;
+       dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
+                pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
+                pdev->bus->host_pci->irq);
 
-       rc = pci_enable_device(pdev);
-       if (rc) {
-               pr_err("%s: Cannot enable device %d-%d_%d\n",
-                      __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
-                      PCI_FUNC(pdev->devfn));
+       if ((pdev->id.manuf != BCMA_MANUF_BCM) ||
+           (pdev->id.id != BCMA_CORE_80211))
                return -ENODEV;
-       }
-       pci_set_master(pdev);
-
-       pci_read_config_dword(pdev, 0x40, &val);
-       if ((val & 0x0000ff00) != 0)
-               pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
        hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops);
        if (!hw) {
@@ -1158,14 +1115,11 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        SET_IEEE80211_DEV(hw, &pdev->dev);
 
-       pci_set_drvdata(pdev, hw);
+       bcma_set_drvdata(pdev, hw);
 
        memset(hw->priv, 0, sizeof(*wl));
 
-       wl = brcms_attach(pdev->vendor, pdev->device,
-                         pci_resource_start(pdev, 0), pdev,
-                         pdev->irq);
-
+       wl = brcms_attach(pdev);
        if (!wl) {
                pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME,
                       __func__);
@@ -1174,16 +1128,23 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 }
 
-static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
+static int brcms_pci_suspend(struct pci_dev *pdev)
+{
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+       return pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
 {
        struct brcms_info *wl;
        struct ieee80211_hw *hw;
 
-       hw = pci_get_drvdata(pdev);
+       hw = bcma_get_drvdata(pdev);
        wl = hw->priv;
        if (!wl) {
                wiphy_err(wl->wiphy,
-                         "brcms_suspend: pci_get_drvdata failed\n");
+                         "brcms_suspend: bcma_get_drvdata failed\n");
                return -ENODEV;
        }
 
@@ -1192,25 +1153,14 @@ static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
        wl->pub->hw_up = false;
        spin_unlock_bh(&wl->lock);
 
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       return pci_set_power_state(pdev, PCI_D3hot);
+       /* temporarily do suspend ourselves */
+       return brcms_pci_suspend(pdev->bus->host_pci);
 }
 
-static int brcms_resume(struct pci_dev *pdev)
+static int brcms_pci_resume(struct pci_dev *pdev)
 {
-       struct brcms_info *wl;
-       struct ieee80211_hw *hw;
        int err = 0;
-       u32 val;
-
-       hw = pci_get_drvdata(pdev);
-       wl = hw->priv;
-       if (!wl) {
-               wiphy_err(wl->wiphy,
-                         "wl: brcms_resume: pci_get_drvdata failed\n");
-               return -ENODEV;
-       }
+       uint val;
 
        err = pci_set_power_state(pdev, PCI_D0);
        if (err)
@@ -1228,24 +1178,28 @@ static int brcms_resume(struct pci_dev *pdev)
        if ((val & 0x0000ff00) != 0)
                pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
+       return 0;
+}
+
+static int brcms_resume(struct bcma_device *pdev)
+{
        /*
-       *  done. driver will be put in up state
-       *  in brcms_ops_add_interface() call.
+       *  just do pci resume for now until bcma supports it.
        */
-       return err;
+       return brcms_pci_resume(pdev->bus->host_pci);
 }
 
-static struct pci_driver brcms_pci_driver = {
+static struct bcma_driver brcms_bcma_driver = {
        .name     = KBUILD_MODNAME,
-       .probe    = brcms_pci_probe,
+       .probe    = brcms_bcma_probe,
        .suspend  = brcms_suspend,
        .resume   = brcms_resume,
        .remove   = __devexit_p(brcms_remove),
-       .id_table = brcms_pci_id_table,
+       .id_table = brcms_coreid_table,
 };
 
 /**
- * This is the main entry point for the WL driver.
+ * This is the main entry point for the brcmsmac driver.
  *
  * This function determines if a device pointed to by pdev is a WL device,
  * and if so, performs a brcms_attach() on it.
@@ -1260,26 +1214,24 @@ static int __init brcms_module_init(void)
                brcm_msg_level = msglevel;
 #endif                         /* BCMDBG */
 
-       error = pci_register_driver(&brcms_pci_driver);
+       error = bcma_driver_register(&brcms_bcma_driver);
+       printk(KERN_ERR "%s: register returned %d\n", __func__, error);
        if (!error)
                return 0;
 
-
-
        return error;
 }
 
 /**
- * This function unloads the WL driver from the system.
+ * This function unloads the brcmsmac driver from the system.
  *
- * This function unconditionally unloads the WL driver module from the
+ * This function unconditionally unloads the brcmsmac driver module from the
  * system.
  *
  */
 static void __exit brcms_module_exit(void)
 {
-       pci_unregister_driver(&brcms_pci_driver);
-
+       bcma_driver_unregister(&brcms_bcma_driver);
 }
 
 module_init(brcms_module_init);
@@ -1313,7 +1265,7 @@ uint brcms_reset(struct brcms_info *wl)
        brcms_c_reset(wl->wlc);
 
        /* dpc will not be rescheduled */
-       wl->resched = 0;
+       wl->resched = false;
 
        return 0;
 }
@@ -1566,7 +1518,7 @@ fail:
 }
 
 /*
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
  * no locking is required.
  */
 int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
@@ -1606,7 +1558,7 @@ void brcms_ucode_free_buf(void *p)
 /*
  * checks validity of all firmware images loaded from user space
  *
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
  * no locking is required.
  */
 int brcms_check_firmwares(struct brcms_info *wl)
index 6242f18..8f60419 100644 (file)
@@ -68,8 +68,6 @@ struct brcms_info {
        spinlock_t lock;        /* per-device perimeter lock */
        spinlock_t isr_lock;    /* per-device ISR synchronization lock */
 
-       /* regsva for unmap in brcms_free() */
-       void __iomem *regsva;   /* opaque chip registers virtual address */
 
        /* timer related fields */
        atomic_t callbacks;     /* # outstanding callback functions */
index 36e3e06..f7ed340 100644 (file)
@@ -388,10 +388,13 @@ static u16 get_sifs(struct brcms_band *band)
  */
 static bool brcms_deviceremoved(struct brcms_c_info *wlc)
 {
+       u32 macctrl;
+
        if (!wlc->hw->clk)
                return ai_deviceremoved(wlc->hw->sih);
-       return (R_REG(&wlc->hw->regs->maccontrol) &
-               (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
+       macctrl = bcma_read32(wlc->hw->d11core,
+                             D11REGOFFS(maccontrol));
+       return (macctrl & (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
 }
 
 /* sum the individual fifo tx pending packet counts */
@@ -582,17 +585,15 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
 static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
                                        bool shortslot)
 {
-       struct d11regs __iomem *regs;
-
-       regs = wlc_hw->regs;
+       struct bcma_device *core = wlc_hw->d11core;
 
        if (shortslot) {
                /* 11g short slot: 11a timing */
-               W_REG(&regs->ifs_slot, 0x0207); /* APHY_SLOT_TIME */
+               bcma_write16(core, D11REGOFFS(ifs_slot), 0x0207);
                brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
        } else {
                /* 11g long slot: 11b timing */
-               W_REG(&regs->ifs_slot, 0x0212); /* BPHY_SLOT_TIME */
+               bcma_write16(core, D11REGOFFS(ifs_slot), 0x0212);
                brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
        }
 }
@@ -672,24 +673,22 @@ static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
 static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
                                const struct d11init *inits)
 {
+       struct bcma_device *core = wlc_hw->d11core;
        int i;
-       u8 __iomem *base;
-       u8 __iomem *addr;
+       uint offset;
        u16 size;
        u32 value;
 
        BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
-       base = (u8 __iomem *)wlc_hw->regs;
-
        for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
                size = le16_to_cpu(inits[i].size);
-               addr = base + le16_to_cpu(inits[i].addr);
+               offset = le16_to_cpu(inits[i].addr);
                value = le32_to_cpu(inits[i].value);
                if (size == 2)
-                       W_REG((u16 __iomem *)addr, value);
+                       bcma_write16(core, offset, value);
                else if (size == 4)
-                       W_REG((u32 __iomem *)addr, value);
+                       bcma_write32(core, offset, value);
                else
                        break;
        }
@@ -739,6 +738,14 @@ static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
        }
 }
 
+static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v)
+{
+       struct bcma_device *core = wlc_hw->d11core;
+       u32 ioctl = bcma_aread32(core, BCMA_IOCTL) & ~m;
+
+       bcma_awrite32(core, BCMA_IOCTL, ioctl | v);
+}
+
 static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
 {
        BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
@@ -747,17 +754,17 @@ static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
 
        if (OFF == clk) {       /* clear gmode bit, put phy into reset */
 
-               ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC | SICF_GMODE),
-                              (SICF_PRST | SICF_FGC));
+               brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC | SICF_GMODE),
+                                  (SICF_PRST | SICF_FGC));
                udelay(1);
-               ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_PRST);
+               brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_PRST);
                udelay(1);
 
        } else {                /* take phy out of reset */
 
-               ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_FGC);
+               brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_FGC);
                udelay(1);
-               ai_core_cflags(wlc_hw->sih, (SICF_FGC), 0);
+               brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
                udelay(1);
 
        }
@@ -778,9 +785,14 @@ static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit)
        wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit];
 
        /* set gmode core flag */
-       if (wlc_hw->sbclk && !wlc_hw->noreset)
-               ai_core_cflags(wlc_hw->sih, SICF_GMODE,
-                              ((bandunit == 0) ? SICF_GMODE : 0));
+       if (wlc_hw->sbclk && !wlc_hw->noreset) {
+               u32 gmode = 0;
+
+               if (bandunit == 0)
+                       gmode = SICF_GMODE;
+
+               brcms_b_core_ioctl(wlc_hw, SICF_GMODE, gmode);
+       }
 }
 
 /* switch to new band but leave it inactive */
@@ -788,10 +800,12 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
 {
        struct brcms_hardware *wlc_hw = wlc->hw;
        u32 macintmask;
+       u32 macctrl;
 
        BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
-       WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+       macctrl = bcma_read32(wlc_hw->d11core,
+                             D11REGOFFS(maccontrol));
+       WARN_ON((macctrl & MCTL_EN_MAC) != 0);
 
        /* disable interrupts */
        macintmask = brcms_intrsoff(wlc->wl);
@@ -955,8 +969,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
        brcms_c_txfifo_complete(wlc, queue, 1);
 
        if (lastframe) {
-               p->next = NULL;
-               p->prev = NULL;
                /* remove PLCP & Broadcom tx descriptor header */
                skb_pull(p, D11_PHY_HDR_LEN);
                skb_pull(p, D11_TXH_LEN);
@@ -984,7 +996,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
 {
        bool morepending = false;
        struct brcms_c_info *wlc = wlc_hw->wlc;
-       struct d11regs __iomem *regs;
+       struct bcma_device *core;
        struct tx_status txstatus, *txs;
        u32 s1, s2;
        uint n = 0;
@@ -997,18 +1009,18 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
        BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
        txs = &txstatus;
-       regs = wlc_hw->regs;
+       core = wlc_hw->d11core;
        *fatal = false;
+       s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
        while (!(*fatal)
-              && (s1 = R_REG(&regs->frmtxstatus)) & TXS_V) {
+              && (s1 & TXS_V)) {
 
                if (s1 == 0xffffffff) {
                        wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n",
                                wlc_hw->unit, __func__);
                        return morepending;
                }
-
-               s2 = R_REG(&regs->frmtxstatus2);
+               s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
 
                txs->status = s1 & TXS_STATUS_MASK;
                txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
@@ -1021,6 +1033,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
                /* !give others some time to run! */
                if (++n >= max_tx_num)
                        break;
+               s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
        }
 
        if (*fatal)
@@ -1065,12 +1078,12 @@ brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init)
        }
 }
 
-static struct dma64regs __iomem *
-dmareg(struct brcms_hardware *hw, uint direction, uint fifonum)
+static uint
+dmareg(uint direction, uint fifonum)
 {
        if (direction == DMA_TX)
-               return &(hw->regs->fifo64regs[fifonum].dmaxmt);
-       return &(hw->regs->fifo64regs[fifonum].dmarcv);
+               return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt);
+       return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv);
 }
 
 static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
@@ -1096,9 +1109,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
                 * TX: TX_AC_BK_FIFO (TX AC Background data packets)
                 * RX: RX_FIFO (RX data packets)
                 */
-               wlc_hw->di[0] = dma_attach(name, wlc_hw->sih,
-                                          (wme ? dmareg(wlc_hw, DMA_TX, 0) :
-                                           NULL), dmareg(wlc_hw, DMA_RX, 0),
+               wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+                                          (wme ? dmareg(DMA_TX, 0) : 0),
+                                          dmareg(DMA_RX, 0),
                                           (wme ? NTXD : 0), NRXD,
                                           RXBUFSZ, -1, NRXBUFPOST,
                                           BRCMS_HWRXOFF, &brcm_msg_level);
@@ -1110,8 +1123,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
                 *   (legacy) TX_DATA_FIFO (TX data packets)
                 * RX: UNUSED
                 */
-               wlc_hw->di[1] = dma_attach(name, wlc_hw->sih,
-                                          dmareg(wlc_hw, DMA_TX, 1), NULL,
+               wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+                                          dmareg(DMA_TX, 1), 0,
                                           NTXD, 0, 0, -1, 0, 0,
                                           &brcm_msg_level);
                dma_attach_err |= (NULL == wlc_hw->di[1]);
@@ -1121,8 +1134,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
                 * TX: TX_AC_VI_FIFO (TX AC Video data packets)
                 * RX: UNUSED
                 */
-               wlc_hw->di[2] = dma_attach(name, wlc_hw->sih,
-                                          dmareg(wlc_hw, DMA_TX, 2), NULL,
+               wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+                                          dmareg(DMA_TX, 2), 0,
                                           NTXD, 0, 0, -1, 0, 0,
                                           &brcm_msg_level);
                dma_attach_err |= (NULL == wlc_hw->di[2]);
@@ -1131,9 +1144,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
                 * TX: TX_AC_VO_FIFO (TX AC Voice data packets)
                 *   (legacy) TX_CTL_FIFO (TX control & mgmt packets)
                 */
-               wlc_hw->di[3] = dma_attach(name, wlc_hw->sih,
-                                          dmareg(wlc_hw, DMA_TX, 3),
-                                          NULL, NTXD, 0, 0, -1,
+               wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+                                          dmareg(DMA_TX, 3),
+                                          0, NTXD, 0, 0, -1,
                                           0, 0, &brcm_msg_level);
                dma_attach_err |= (NULL == wlc_hw->di[3]);
 /* Cleaner to leave this as if with AP defined */
@@ -1207,7 +1220,7 @@ static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
 /* control chip clock to save power, enable dynamic clock or force fast clock */
 static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
 {
-       if (wlc_hw->sih->cccaps & CC_CAP_PMU) {
+       if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) {
                /* new chips with PMU, CCS_FORCEHT will distribute the HT clock
                 * on backplane, but mac core will still run on ALP(not HT) when
                 * it enters powersave mode, which means the FCA bit may not be
@@ -1216,29 +1229,33 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
 
                if (wlc_hw->clk) {
                        if (mode == CLK_FAST) {
-                               OR_REG(&wlc_hw->regs->clk_ctl_st,
-                                      CCS_FORCEHT);
+                               bcma_set32(wlc_hw->d11core,
+                                          D11REGOFFS(clk_ctl_st),
+                                          CCS_FORCEHT);
 
                                udelay(64);
 
-                               SPINWAIT(((R_REG
-                                          (&wlc_hw->regs->
-                                           clk_ctl_st) & CCS_HTAVAIL) == 0),
-                                        PMU_MAX_TRANSITION_DLY);
-                               WARN_ON(!(R_REG
-                                         (&wlc_hw->regs->
-                                          clk_ctl_st) & CCS_HTAVAIL));
+                               SPINWAIT(
+                                   ((bcma_read32(wlc_hw->d11core,
+                                     D11REGOFFS(clk_ctl_st)) &
+                                     CCS_HTAVAIL) == 0),
+                                     PMU_MAX_TRANSITION_DLY);
+                               WARN_ON(!(bcma_read32(wlc_hw->d11core,
+                                       D11REGOFFS(clk_ctl_st)) &
+                                       CCS_HTAVAIL));
                        } else {
-                               if ((wlc_hw->sih->pmurev == 0) &&
-                                   (R_REG
-                                    (&wlc_hw->regs->
-                                     clk_ctl_st) & (CCS_FORCEHT | CCS_HTAREQ)))
-                                       SPINWAIT(((R_REG
-                                                  (&wlc_hw->regs->
-                                                   clk_ctl_st) & CCS_HTAVAIL)
-                                                 == 0),
-                                                PMU_MAX_TRANSITION_DLY);
-                               AND_REG(&wlc_hw->regs->clk_ctl_st,
+                               if ((ai_get_pmurev(wlc_hw->sih) == 0) &&
+                                   (bcma_read32(wlc_hw->d11core,
+                                       D11REGOFFS(clk_ctl_st)) &
+                                       (CCS_FORCEHT | CCS_HTAREQ)))
+                                       SPINWAIT(
+                                           ((bcma_read32(wlc_hw->d11core,
+                                             offsetof(struct d11regs,
+                                                      clk_ctl_st)) &
+                                             CCS_HTAVAIL) == 0),
+                                             PMU_MAX_TRANSITION_DLY);
+                               bcma_mask32(wlc_hw->d11core,
+                                       D11REGOFFS(clk_ctl_st),
                                        ~CCS_FORCEHT);
                        }
                }
@@ -1253,7 +1270,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
 
                /* check fast clock is available (if core is not in reset) */
                if (wlc_hw->forcefastclk && wlc_hw->clk)
-                       WARN_ON(!(ai_core_sflags(wlc_hw->sih, 0, 0) &
+                       WARN_ON(!(bcma_aread32(wlc_hw->d11core, BCMA_IOST) &
                                  SISF_FCLKA));
 
                /*
@@ -1370,7 +1387,8 @@ static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw)
                maccontrol |= MCTL_INFRA;
        }
 
-       W_REG(&wlc_hw->regs->maccontrol, maccontrol);
+       bcma_write32(wlc_hw->d11core, D11REGOFFS(maccontrol),
+                    maccontrol);
 }
 
 /* set or clear maccontrol bits */
@@ -1464,7 +1482,7 @@ static void
 brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
                       const u8 *addr)
 {
-       struct d11regs __iomem *regs;
+       struct bcma_device *core = wlc_hw->d11core;
        u16 mac_l;
        u16 mac_m;
        u16 mac_h;
@@ -1472,38 +1490,36 @@ brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
        BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
                 wlc_hw->unit);
 
-       regs = wlc_hw->regs;
        mac_l = addr[0] | (addr[1] << 8);
        mac_m = addr[2] | (addr[3] << 8);
        mac_h = addr[4] | (addr[5] << 8);
 
        /* enter the MAC addr into the RXE match registers */
-       W_REG(&regs->rcm_ctl, RCM_INC_DATA | match_reg_offset);
-       W_REG(&regs->rcm_mat_data, mac_l);
-       W_REG(&regs->rcm_mat_data, mac_m);
-       W_REG(&regs->rcm_mat_data, mac_h);
-
+       bcma_write16(core, D11REGOFFS(rcm_ctl),
+                    RCM_INC_DATA | match_reg_offset);
+       bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_l);
+       bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_m);
+       bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_h);
 }
 
 void
 brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
                            void *buf)
 {
-       struct d11regs __iomem *regs;
+       struct bcma_device *core = wlc_hw->d11core;
        u32 word;
        __le32 word_le;
        __be32 word_be;
        bool be_bit;
        BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
-       regs = wlc_hw->regs;
-       W_REG(&regs->tplatewrptr, offset);
+       bcma_write32(core, D11REGOFFS(tplatewrptr), offset);
 
        /* if MCTL_BIGEND bit set in mac control register,
         * the chip swaps data in fifo, as well as data in
         * template ram
         */
-       be_bit = (R_REG(&regs->maccontrol) & MCTL_BIGEND) != 0;
+       be_bit = (bcma_read32(core, D11REGOFFS(maccontrol)) & MCTL_BIGEND) != 0;
 
        while (len > 0) {
                memcpy(&word, buf, sizeof(u32));
@@ -1516,7 +1532,7 @@ brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
                        word = *(u32 *)&word_le;
                }
 
-               W_REG(&regs->tplatewrdata, word);
+               bcma_write32(core, D11REGOFFS(tplatewrdata), word);
 
                buf = (u8 *) buf + sizeof(u32);
                len -= sizeof(u32);
@@ -1527,18 +1543,20 @@ static void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin)
 {
        wlc_hw->band->CWmin = newmin;
 
-       W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMIN);
-       (void)R_REG(&wlc_hw->regs->objaddr);
-       W_REG(&wlc_hw->regs->objdata, newmin);
+       bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+                    OBJADDR_SCR_SEL | S_DOT11_CWMIN);
+       (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+       bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmin);
 }
 
 static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax)
 {
        wlc_hw->band->CWmax = newmax;
 
-       W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMAX);
-       (void)R_REG(&wlc_hw->regs->objaddr);
-       W_REG(&wlc_hw->regs->objdata, newmax);
+       bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+                    OBJADDR_SCR_SEL | S_DOT11_CWMAX);
+       (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+       bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmax);
 }
 
 void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
@@ -1704,17 +1722,17 @@ void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
 {
        BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
-       ai_corereg(wlc_hw->sih, SI_CC_IDX,
-                  offsetof(struct chipcregs, chipcontrol_addr), ~0, 0);
+       ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr),
+                 ~0, 0);
        udelay(1);
-       ai_corereg(wlc_hw->sih, SI_CC_IDX,
-                  offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+       ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+                 0x4, 0);
        udelay(1);
-       ai_corereg(wlc_hw->sih, SI_CC_IDX,
-                  offsetof(struct chipcregs, chipcontrol_data), 0x4, 4);
+       ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+                 0x4, 4);
        udelay(1);
-       ai_corereg(wlc_hw->sih, SI_CC_IDX,
-                  offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+       ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+                 0x4, 0);
        udelay(1);
 }
 
@@ -1728,18 +1746,18 @@ void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk)
                return;
 
        if (ON == clk)
-               ai_core_cflags(wlc_hw->sih, SICF_FGC, SICF_FGC);
+               brcms_b_core_ioctl(wlc_hw, SICF_FGC, SICF_FGC);
        else
-               ai_core_cflags(wlc_hw->sih, SICF_FGC, 0);
+               brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
 
 }
 
 void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk)
 {
        if (ON == clk)
-               ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE);
+               brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, SICF_MPCLKE);
        else
-               ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0);
+               brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, 0);
 }
 
 void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
@@ -1759,7 +1777,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
        if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
            NREV_LE(wlc_hw->band->phyrev, 4)) {
                /* Set the PHY bandwidth */
-               ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits);
+               brcms_b_core_ioctl(wlc_hw, SICF_BWMASK, phy_bw_clkbits);
 
                udelay(1);
 
@@ -1767,13 +1785,13 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
                brcms_b_core_phypll_reset(wlc_hw);
 
                /* reset the PHY */
-               ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE),
-                              (SICF_PRST | SICF_PCLKE));
+               brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE),
+                                  (SICF_PRST | SICF_PCLKE));
                phy_in_reset = true;
        } else {
-               ai_core_cflags(wlc_hw->sih,
-                              (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
-                              (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
+               brcms_b_core_ioctl(wlc_hw,
+                                  (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
+                                  (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
        }
 
        udelay(2);
@@ -1790,8 +1808,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
        u32 macintmask;
 
        /* Enable the d11 core before accessing it */
-       if (!ai_iscoreup(wlc_hw->sih)) {
-               ai_core_reset(wlc_hw->sih, 0, 0);
+       if (!bcma_core_is_enabled(wlc_hw->d11core)) {
+               bcma_core_enable(wlc_hw->d11core, 0);
                brcms_c_mctrl_reset(wlc_hw);
        }
 
@@ -1817,7 +1835,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
        brcms_intrsrestore(wlc->wl, macintmask);
 
        /* ucode should still be suspended.. */
-       WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+       WARN_ON((bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) &
+                MCTL_EN_MAC) != 0);
 }
 
 static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw)
@@ -1845,7 +1864,7 @@ static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
        uint b2 = boardrev & 0xf;
 
        /* voards from other vendors are always considered valid */
-       if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM)
+       if (ai_get_boardvendor(wlc_hw->sih) != PCI_VENDOR_ID_BROADCOM)
                return true;
 
        /* do some boardrev sanity checks when boardvendor is Broadcom */
@@ -1917,7 +1936,7 @@ static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
 static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
 {
        bool v, clk, xtal;
-       u32 resetbits = 0, flags = 0;
+       u32 flags = 0;
 
        xtal = wlc_hw->sbclk;
        if (!xtal)
@@ -1934,22 +1953,22 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
                flags |= SICF_PCLKE;
 
                /*
+                * TODO: test suspend/resume
+                *
                 * AI chip doesn't restore bar0win2 on
                 * hibernation/resume, need sw fixup
                 */
-               if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
-                   (wlc_hw->sih->chip == BCM43225_CHIP_ID))
-                       wlc_hw->regs = (struct d11regs __iomem *)
-                                       ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
-               ai_core_reset(wlc_hw->sih, flags, resetbits);
+
+               bcma_core_enable(wlc_hw->d11core, flags);
                brcms_c_mctrl_reset(wlc_hw);
        }
 
-       v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0);
+       v = ((bcma_read32(wlc_hw->d11core,
+                         D11REGOFFS(phydebug)) & PDBG_RFD) != 0);
 
        /* put core back into reset */
        if (!clk)
-               ai_core_disable(wlc_hw->sih, 0);
+               bcma_core_disable(wlc_hw->d11core, 0);
 
        if (!xtal)
                brcms_b_xtal(wlc_hw, OFF);
@@ -1973,25 +1992,21 @@ static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo)
  */
 void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
 {
-       struct d11regs __iomem *regs;
        uint i;
        bool fastclk;
-       u32 resetbits = 0;
 
        if (flags == BRCMS_USE_COREFLAGS)
                flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
 
        BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
-       regs = wlc_hw->regs;
-
        /* request FAST clock if not on  */
        fastclk = wlc_hw->forcefastclk;
        if (!fastclk)
                brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
 
        /* reset the dma engines except first time thru */
-       if (ai_iscoreup(wlc_hw->sih)) {
+       if (bcma_core_is_enabled(wlc_hw->d11core)) {
                for (i = 0; i < NFIFO; i++)
                        if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
                                wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: "
@@ -2029,14 +2044,14 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
         * they may touch chipcommon as well.
         */
        wlc_hw->clk = false;
-       ai_core_reset(wlc_hw->sih, flags, resetbits);
+       bcma_core_enable(wlc_hw->d11core, flags);
        wlc_hw->clk = true;
        if (wlc_hw->band && wlc_hw->band->pi)
                wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true);
 
        brcms_c_mctrl_reset(wlc_hw);
 
-       if (wlc_hw->sih->cccaps & CC_CAP_PMU)
+       if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU)
                brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
 
        brcms_b_phy_reset(wlc_hw);
@@ -2057,7 +2072,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
  */
 static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
 {
-       struct d11regs __iomem *regs = wlc_hw->regs;
+       struct bcma_device *core = wlc_hw->d11core;
        u16 fifo_nu;
        u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
        u16 txfifo_def, txfifo_def1;
@@ -2078,11 +2093,11 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
                txfifo_cmd =
                    TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT);
 
-               W_REG(&regs->xmtfifocmd, txfifo_cmd);
-               W_REG(&regs->xmtfifodef, txfifo_def);
-               W_REG(&regs->xmtfifodef1, txfifo_def1);
+               bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
+               bcma_write16(core, D11REGOFFS(xmtfifodef), txfifo_def);
+               bcma_write16(core, D11REGOFFS(xmtfifodef1), txfifo_def1);
 
-               W_REG(&regs->xmtfifocmd, txfifo_cmd);
+               bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
 
                txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu];
        }
@@ -2117,27 +2132,27 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
 
 void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
 {
-       struct d11regs __iomem *regs = wlc_hw->regs;
+       struct bcma_device *core = wlc_hw->d11core;
 
-       if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
-           (wlc_hw->sih->chip == BCM43225_CHIP_ID)) {
+       if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
+           (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) {
                if (spurmode == WL_SPURAVOID_ON2) {     /* 126Mhz */
-                       W_REG(&regs->tsf_clk_frac_l, 0x2082);
-                       W_REG(&regs->tsf_clk_frac_h, 0x8);
+                       bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
+                       bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
                } else if (spurmode == WL_SPURAVOID_ON1) {      /* 123Mhz */
-                       W_REG(&regs->tsf_clk_frac_l, 0x5341);
-                       W_REG(&regs->tsf_clk_frac_h, 0x8);
+                       bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x5341);
+                       bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
                } else {        /* 120Mhz */
-                       W_REG(&regs->tsf_clk_frac_l, 0x8889);
-                       W_REG(&regs->tsf_clk_frac_h, 0x8);
+                       bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x8889);
+                       bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
                }
        } else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
                if (spurmode == WL_SPURAVOID_ON1) {     /* 82Mhz */
-                       W_REG(&regs->tsf_clk_frac_l, 0x7CE0);
-                       W_REG(&regs->tsf_clk_frac_h, 0xC);
+                       bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x7CE0);
+                       bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
                } else {        /* 80Mhz */
-                       W_REG(&regs->tsf_clk_frac_l, 0xCCCD);
-                       W_REG(&regs->tsf_clk_frac_h, 0xC);
+                       bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0xCCCD);
+                       bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
                }
        }
 }
@@ -2146,11 +2161,8 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
 static void brcms_c_gpio_init(struct brcms_c_info *wlc)
 {
        struct brcms_hardware *wlc_hw = wlc->hw;
-       struct d11regs __iomem *regs;
        u32 gc, gm;
 
-       regs = wlc_hw->regs;
-
        /* use GPIO select 0 to get all gpio signals from the gpio out reg */
        brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
 
@@ -2181,10 +2193,10 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
                 * The board itself is powered by these GPIOs
                 * (when not sending pattern) so set them high
                 */
-               OR_REG(&regs->psm_gpio_oe,
-                      (BOARD_GPIO_12 | BOARD_GPIO_13));
-               OR_REG(&regs->psm_gpio_out,
-                      (BOARD_GPIO_12 | BOARD_GPIO_13));
+               bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_oe),
+                          (BOARD_GPIO_12 | BOARD_GPIO_13));
+               bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_out),
+                          (BOARD_GPIO_12 | BOARD_GPIO_13));
 
                /* Enable antenna diversity, use 2x4 mode */
                brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
@@ -2211,7 +2223,7 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
 static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
                              const __le32 ucode[], const size_t nbytes)
 {
-       struct d11regs __iomem *regs = wlc_hw->regs;
+       struct bcma_device *core = wlc_hw->d11core;
        uint i;
        uint count;
 
@@ -2219,10 +2231,11 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
 
        count = (nbytes / sizeof(u32));
 
-       W_REG(&regs->objaddr, (OBJADDR_AUTO_INC | OBJADDR_UCM_SEL));
-       (void)R_REG(&regs->objaddr);
+       bcma_write32(core, D11REGOFFS(objaddr),
+                    OBJADDR_AUTO_INC | OBJADDR_UCM_SEL);
+       (void)bcma_read32(core, D11REGOFFS(objaddr));
        for (i = 0; i < count; i++)
-               W_REG(&regs->objdata, le32_to_cpu(ucode[i]));
+               bcma_write32(core, D11REGOFFS(objdata), le32_to_cpu(ucode[i]));
 
 }
 
@@ -2288,7 +2301,7 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
        bool fatal = false;
        uint unit;
        uint intstatus, idx;
-       struct d11regs __iomem *regs = wlc_hw->regs;
+       struct bcma_device *core = wlc_hw->d11core;
        struct wiphy *wiphy = wlc_hw->wlc->wiphy;
 
        unit = wlc_hw->unit;
@@ -2296,7 +2309,9 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
        for (idx = 0; idx < NFIFO; idx++) {
                /* read intstatus register and ignore any non-error bits */
                intstatus =
-                   R_REG(&regs->intctrlregs[idx].intstatus) & I_ERRORS;
+                       bcma_read32(core,
+                                   D11REGOFFS(intctrlregs[idx].intstatus)) &
+                       I_ERRORS;
                if (!intstatus)
                        continue;
 
@@ -2341,8 +2356,9 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
                        brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */
                        break;
                } else
-                       W_REG(&regs->intctrlregs[idx].intstatus,
-                             intstatus);
+                       bcma_write32(core,
+                                    D11REGOFFS(intctrlregs[idx].intstatus),
+                                    intstatus);
        }
 }
 
@@ -2350,28 +2366,7 @@ void brcms_c_intrson(struct brcms_c_info *wlc)
 {
        struct brcms_hardware *wlc_hw = wlc->hw;
        wlc->macintmask = wlc->defmacintmask;
-       W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
-}
-
-/*
- * callback for siutils.c, which has only wlc handler, no wl they both check
- * up, not only because there is no need to off/restore d11 interrupt but also
- * because per-port code may require sync with valid interrupt.
- */
-static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc)
-{
-       if (!wlc->hw->up)
-               return 0;
-
-       return brcms_intrsoff(wlc->wl);
-}
-
-static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask)
-{
-       if (!wlc->hw->up)
-               return;
-
-       brcms_intrsrestore(wlc->wl, macintmask);
+       bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
 }
 
 u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
@@ -2384,8 +2379,8 @@ u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
 
        macintmask = wlc->macintmask;   /* isr can still happen */
 
-       W_REG(&wlc_hw->regs->macintmask, 0);
-       (void)R_REG(&wlc_hw->regs->macintmask); /* sync readback */
+       bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), 0);
+       (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(macintmask));
        udelay(1);              /* ensure int line is no longer driven */
        wlc->macintmask = 0;
 
@@ -2400,7 +2395,7 @@ void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask)
                return;
 
        wlc->macintmask = macintmask;
-       W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
+       bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
 }
 
 /* assumes that the d11 MAC is enabled */
@@ -2512,11 +2507,11 @@ brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx)
 static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
 {
        struct brcms_hardware *wlc_hw = wlc->hw;
-       struct d11regs __iomem *regs = wlc_hw->regs;
+       struct bcma_device *core = wlc_hw->d11core;
        u32 macintstatus;
 
        /* macintstatus includes a DMA interrupt summary bit */
-       macintstatus = R_REG(&regs->macintstatus);
+       macintstatus = bcma_read32(core, D11REGOFFS(macintstatus));
 
        BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit,
                 macintstatus);
@@ -2543,12 +2538,12 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
         * consequences
         */
        /* turn off the interrupts */
-       W_REG(&regs->macintmask, 0);
-       (void)R_REG(&regs->macintmask); /* sync readback */
+       bcma_write32(core, D11REGOFFS(macintmask), 0);
+       (void)bcma_read32(core, D11REGOFFS(macintmask));
        wlc->macintmask = 0;
 
        /* clear device interrupts */
-       W_REG(&regs->macintstatus, macintstatus);
+       bcma_write32(core, D11REGOFFS(macintstatus), macintstatus);
 
        /* MI_DMAINT is indication of non-zero intstatus */
        if (macintstatus & MI_DMAINT)
@@ -2557,8 +2552,8 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
                 * RX_FIFO. If MI_DMAINT is set, assume it
                 * is set and clear the interrupt.
                 */
-               W_REG(&regs->intctrlregs[RX_FIFO].intstatus,
-                     DEF_RXINTMASK);
+               bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intstatus),
+                            DEF_RXINTMASK);
 
        return macintstatus;
 }
@@ -2621,7 +2616,7 @@ bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc)
 void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
 {
        struct brcms_hardware *wlc_hw = wlc->hw;
-       struct d11regs __iomem *regs = wlc_hw->regs;
+       struct bcma_device *core = wlc_hw->d11core;
        u32 mc, mi;
        struct wiphy *wiphy = wlc->wiphy;
 
@@ -2638,7 +2633,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
        /* force the core awake */
        brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
 
-       mc = R_REG(&regs->maccontrol);
+       mc = bcma_read32(core, D11REGOFFS(maccontrol));
 
        if (mc == 0xffffffff) {
                wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
@@ -2650,7 +2645,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
        WARN_ON(!(mc & MCTL_PSM_RUN));
        WARN_ON(!(mc & MCTL_EN_MAC));
 
-       mi = R_REG(&regs->macintstatus);
+       mi = bcma_read32(core, D11REGOFFS(macintstatus));
        if (mi == 0xffffffff) {
                wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
                          __func__);
@@ -2661,21 +2656,21 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
 
        brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0);
 
-       SPINWAIT(!(R_REG(&regs->macintstatus) & MI_MACSSPNDD),
+       SPINWAIT(!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD),
                 BRCMS_MAX_MAC_SUSPEND);
 
-       if (!(R_REG(&regs->macintstatus) & MI_MACSSPNDD)) {
+       if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) {
                wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
                          " and MI_MACSSPNDD is still not on.\n",
                          wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
                wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
                          "psm_brc 0x%04x\n", wlc_hw->unit,
-                         R_REG(&regs->psmdebug),
-                         R_REG(&regs->phydebug),
-                         R_REG(&regs->psm_brc));
+                         bcma_read32(core, D11REGOFFS(psmdebug)),
+                         bcma_read32(core, D11REGOFFS(phydebug)),
+                         bcma_read16(core, D11REGOFFS(psm_brc)));
        }
 
-       mc = R_REG(&regs->maccontrol);
+       mc = bcma_read32(core, D11REGOFFS(maccontrol));
        if (mc == 0xffffffff) {
                wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
                          __func__);
@@ -2690,7 +2685,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
 void brcms_c_enable_mac(struct brcms_c_info *wlc)
 {
        struct brcms_hardware *wlc_hw = wlc->hw;
-       struct d11regs __iomem *regs = wlc_hw->regs;
+       struct bcma_device *core = wlc_hw->d11core;
        u32 mc, mi;
 
        BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
@@ -2703,20 +2698,20 @@ void brcms_c_enable_mac(struct brcms_c_info *wlc)
        if (wlc_hw->mac_suspend_depth > 0)
                return;
 
-       mc = R_REG(&regs->maccontrol);
+       mc = bcma_read32(core, D11REGOFFS(maccontrol));
        WARN_ON(mc & MCTL_PSM_JMP_0);
        WARN_ON(mc & MCTL_EN_MAC);
        WARN_ON(!(mc & MCTL_PSM_RUN));
 
        brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
-       W_REG(&regs->macintstatus, MI_MACSSPNDD);
+       bcma_write32(core, D11REGOFFS(macintstatus), MI_MACSSPNDD);
 
-       mc = R_REG(&regs->maccontrol);
+       mc = bcma_read32(core, D11REGOFFS(maccontrol));
        WARN_ON(mc & MCTL_PSM_JMP_0);
        WARN_ON(!(mc & MCTL_EN_MAC));
        WARN_ON(!(mc & MCTL_PSM_RUN));
 
-       mi = R_REG(&regs->macintstatus);
+       mi = bcma_read32(core, D11REGOFFS(macintstatus));
        WARN_ON(mi & MI_MACSSPNDD);
 
        brcms_c_ucode_wake_override_clear(wlc_hw,
@@ -2733,55 +2728,53 @@ void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode)
 
 static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
 {
-       struct d11regs __iomem *regs;
+       struct bcma_device *core = wlc_hw->d11core;
        u32 w, val;
        struct wiphy *wiphy = wlc_hw->wlc->wiphy;
 
        BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
 
-       regs = wlc_hw->regs;
-
        /* Validate dchip register access */
 
-       W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-       (void)R_REG(&regs->objaddr);
-       w = R_REG(&regs->objdata);
+       bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+       (void)bcma_read32(core, D11REGOFFS(objaddr));
+       w = bcma_read32(core, D11REGOFFS(objdata));
 
        /* Can we write and read back a 32bit register? */
-       W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-       (void)R_REG(&regs->objaddr);
-       W_REG(&regs->objdata, (u32) 0xaa5555aa);
+       bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+       (void)bcma_read32(core, D11REGOFFS(objaddr));
+       bcma_write32(core, D11REGOFFS(objdata), (u32) 0xaa5555aa);
 
-       W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-       (void)R_REG(&regs->objaddr);
-       val = R_REG(&regs->objdata);
+       bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+       (void)bcma_read32(core, D11REGOFFS(objaddr));
+       val = bcma_read32(core, D11REGOFFS(objdata));
        if (val != (u32) 0xaa5555aa) {
                wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
                          "expected 0xaa5555aa\n", wlc_hw->unit, val);
                return false;
        }
 
-       W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-       (void)R_REG(&regs->objaddr);
-       W_REG(&regs->objdata, (u32) 0x55aaaa55);
+       bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+       (void)bcma_read32(core, D11REGOFFS(objaddr));
+       bcma_write32(core, D11REGOFFS(objdata), (u32) 0x55aaaa55);
 
-       W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-       (void)R_REG(&regs->objaddr);
-       val = R_REG(&regs->objdata);
+       bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+       (void)bcma_read32(core, D11REGOFFS(objaddr));
+       val = bcma_read32(core, D11REGOFFS(objdata));
        if (val != (u32) 0x55aaaa55) {
                wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
                          "expected 0x55aaaa55\n", wlc_hw->unit, val);
                return false;
        }
 
-       W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-       (void)R_REG(&regs->objaddr);
-       W_REG(&regs->objdata, w);
+       bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+       (void)bcma_read32(core, D11REGOFFS(objaddr));
+       bcma_write32(core, D11REGOFFS(objdata), w);
 
        /* clear CFPStart */
-       W_REG(&regs->tsf_cfpstart, 0);
+       bcma_write32(core, D11REGOFFS(tsf_cfpstart), 0);
 
-       w = R_REG(&regs->maccontrol);
+       w = bcma_read32(core, D11REGOFFS(maccontrol));
        if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
            (w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
                wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = "
@@ -2798,38 +2791,38 @@ static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
 
 void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
 {
-       struct d11regs __iomem *regs;
+       struct bcma_device *core = wlc_hw->d11core;
        u32 tmp;
 
        BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
        tmp = 0;
-       regs = wlc_hw->regs;
 
        if (on) {
-               if ((wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
-                       OR_REG(&regs->clk_ctl_st,
-                              (CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL |
-                               CCS_ERSRC_REQ_PHYPLL));
-                       SPINWAIT((R_REG(&regs->clk_ctl_st) &
-                                 (CCS_ERSRC_AVAIL_HT)) != (CCS_ERSRC_AVAIL_HT),
+               if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
+                       bcma_set32(core, D11REGOFFS(clk_ctl_st),
+                                  CCS_ERSRC_REQ_HT |
+                                  CCS_ERSRC_REQ_D11PLL |
+                                  CCS_ERSRC_REQ_PHYPLL);
+                       SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
+                                 CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT,
                                 PHYPLL_WAIT_US);
 
-                       tmp = R_REG(&regs->clk_ctl_st);
-                       if ((tmp & (CCS_ERSRC_AVAIL_HT)) !=
-                           (CCS_ERSRC_AVAIL_HT))
+                       tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
+                       if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT)
                                wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY"
                                          " PLL failed\n", __func__);
                } else {
-                       OR_REG(&regs->clk_ctl_st,
-                              (CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL));
-                       SPINWAIT((R_REG(&regs->clk_ctl_st) &
+                       bcma_set32(core, D11REGOFFS(clk_ctl_st),
+                                  tmp | CCS_ERSRC_REQ_D11PLL |
+                                  CCS_ERSRC_REQ_PHYPLL);
+                       SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
                                  (CCS_ERSRC_AVAIL_D11PLL |
                                   CCS_ERSRC_AVAIL_PHYPLL)) !=
                                 (CCS_ERSRC_AVAIL_D11PLL |
                                  CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US);
 
-                       tmp = R_REG(&regs->clk_ctl_st);
+                       tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
                        if ((tmp &
                             (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
                            !=
@@ -2843,8 +2836,9 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
                 * be requesting it; so we'll deassert the request but
                 * not wait for status to comply.
                 */
-               AND_REG(&regs->clk_ctl_st, ~CCS_ERSRC_REQ_PHYPLL);
-               tmp = R_REG(&regs->clk_ctl_st);
+               bcma_mask32(core, D11REGOFFS(clk_ctl_st),
+                           ~CCS_ERSRC_REQ_PHYPLL);
+               (void)bcma_read32(core, D11REGOFFS(clk_ctl_st));
        }
 }
 
@@ -2872,7 +2866,7 @@ static void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
        brcms_b_core_phypll_ctl(wlc_hw, false);
 
        wlc_hw->clk = false;
-       ai_core_disable(wlc_hw->sih, 0);
+       bcma_core_disable(wlc_hw->d11core, 0);
        wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
 }
 
@@ -2896,35 +2890,31 @@ static void brcms_c_flushqueues(struct brcms_c_info *wlc)
 static u16
 brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
 {
-       struct d11regs __iomem *regs = wlc_hw->regs;
-       u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
-       u16 __iomem *objdata_hi = objdata_lo + 1;
-       u16 v;
+       struct bcma_device *core = wlc_hw->d11core;
+       u16 objoff = D11REGOFFS(objdata);
 
-       W_REG(&regs->objaddr, sel | (offset >> 2));
-       (void)R_REG(&regs->objaddr);
+       bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+       (void)bcma_read32(core, D11REGOFFS(objaddr));
        if (offset & 2)
-               v = R_REG(objdata_hi);
-       else
-               v = R_REG(objdata_lo);
+               objoff += 2;
 
-       return v;
+       return bcma_read16(core, objoff);
+;
 }
 
 static void
 brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
                     u32 sel)
 {
-       struct d11regs __iomem *regs = wlc_hw->regs;
-       u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
-       u16 __iomem *objdata_hi = objdata_lo + 1;
+       struct bcma_device *core = wlc_hw->d11core;
+       u16 objoff = D11REGOFFS(objdata);
 
-       W_REG(&regs->objaddr, sel | (offset >> 2));
-       (void)R_REG(&regs->objaddr);
+       bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+       (void)bcma_read32(core, D11REGOFFS(objaddr));
        if (offset & 2)
-               W_REG(objdata_hi, v);
-       else
-               W_REG(objdata_lo, v);
+               objoff += 2;
+
+       bcma_write16(core, objoff, v);
 }
 
 /*
@@ -3010,14 +3000,14 @@ static void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw,
 
        /* write retry limit to SCR, shouldn't need to suspend */
        if (wlc_hw->up) {
-               W_REG(&wlc_hw->regs->objaddr,
-                     OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
-               (void)R_REG(&wlc_hw->regs->objaddr);
-               W_REG(&wlc_hw->regs->objdata, wlc_hw->SRL);
-               W_REG(&wlc_hw->regs->objaddr,
-                     OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
-               (void)R_REG(&wlc_hw->regs->objaddr);
-               W_REG(&wlc_hw->regs->objdata, wlc_hw->LRL);
+               bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+                            OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+               (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+               bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->SRL);
+               bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+                            OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+               (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+               bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->LRL);
        }
 }
 
@@ -3064,7 +3054,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
                return false;
 
        /* disallow PS when one of these meets when not scanning */
-       if (wlc->monitor)
+       if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
                return false;
 
        if (cfg->associated) {
@@ -3199,9 +3189,9 @@ void brcms_c_init_scb(struct scb *scb)
 static void brcms_b_coreinit(struct brcms_c_info *wlc)
 {
        struct brcms_hardware *wlc_hw = wlc->hw;
-       struct d11regs __iomem *regs;
+       struct bcma_device *core = wlc_hw->d11core;
        u32 sflags;
-       uint bcnint_us;
+       u32 bcnint_us;
        uint i = 0;
        bool fifosz_fixup = false;
        int err = 0;
@@ -3209,8 +3199,6 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
        struct wiphy *wiphy = wlc->wiphy;
        struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
 
-       regs = wlc_hw->regs;
-
        BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
        /* reset PSM */
@@ -3223,20 +3211,20 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
        fifosz_fixup = true;
 
        /* let the PSM run to the suspended state, set mode to BSS STA */
-       W_REG(&regs->macintstatus, -1);
+       bcma_write32(core, D11REGOFFS(macintstatus), -1);
        brcms_b_mctrl(wlc_hw, ~0,
                       (MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE));
 
        /* wait for ucode to self-suspend after auto-init */
-       SPINWAIT(((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0),
-                1000 * 1000);
-       if ((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0)
+       SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) &
+                  MI_MACSSPNDD) == 0), 1000 * 1000);
+       if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0)
                wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
                          "suspend!\n", wlc_hw->unit);
 
        brcms_c_gpio_init(wlc);
 
-       sflags = ai_core_sflags(wlc_hw->sih, 0, 0);
+       sflags = bcma_aread32(core, BCMA_IOST);
 
        if (D11REV_IS(wlc_hw->corerev, 23)) {
                if (BRCMS_ISNPHY(wlc_hw->band))
@@ -3300,7 +3288,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
                          wlc_hw->xmtfifo_sz[i], i);
 
        /* make sure we can still talk to the mac */
-       WARN_ON(R_REG(&regs->maccontrol) == 0xffffffff);
+       WARN_ON(bcma_read32(core, D11REGOFFS(maccontrol)) == 0xffffffff);
 
        /* band-specific inits done by wlc_bsinit() */
 
@@ -3309,7 +3297,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
        brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
 
        /* enable one rx interrupt per received frame */
-       W_REG(&regs->intrcvlazy[0], (1 << IRL_FC_SHIFT));
+       bcma_write32(core, D11REGOFFS(intrcvlazy[0]), (1 << IRL_FC_SHIFT));
 
        /* set the station mode (BSS STA) */
        brcms_b_mctrl(wlc_hw,
@@ -3318,19 +3306,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
 
        /* set up Beacon interval */
        bcnint_us = 0x8000 << 10;
-       W_REG(&regs->tsf_cfprep, (bcnint_us << CFPREP_CBI_SHIFT));
-       W_REG(&regs->tsf_cfpstart, bcnint_us);
-       W_REG(&regs->macintstatus, MI_GP1);
+       bcma_write32(core, D11REGOFFS(tsf_cfprep),
+                    (bcnint_us << CFPREP_CBI_SHIFT));
+       bcma_write32(core, D11REGOFFS(tsf_cfpstart), bcnint_us);
+       bcma_write32(core, D11REGOFFS(macintstatus), MI_GP1);
 
        /* write interrupt mask */
-       W_REG(&regs->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK);
+       bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intmask),
+                    DEF_RXINTMASK);
 
        /* allow the MAC to control the PHY clock (dynamic on/off) */
        brcms_b_macphyclk_set(wlc_hw, ON);
 
        /* program dynamic clock control fast powerup delay register */
        wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih);
-       W_REG(&regs->scc_fastpwrup_dly, wlc->fastpwrup_dly);
+       bcma_write16(core, D11REGOFFS(scc_fastpwrup_dly), wlc->fastpwrup_dly);
 
        /* tell the ucode the corerev */
        brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
@@ -3343,19 +3333,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
                                      machwcap >> 16) & 0xffff));
 
        /* write retry limits to SCR, this done after PSM init */
-       W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
-       (void)R_REG(&regs->objaddr);
-       W_REG(&regs->objdata, wlc_hw->SRL);
-       W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
-       (void)R_REG(&regs->objaddr);
-       W_REG(&regs->objdata, wlc_hw->LRL);
+       bcma_write32(core, D11REGOFFS(objaddr),
+                    OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+       (void)bcma_read32(core, D11REGOFFS(objaddr));
+       bcma_write32(core, D11REGOFFS(objdata), wlc_hw->SRL);
+       bcma_write32(core, D11REGOFFS(objaddr),
+                    OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+       (void)bcma_read32(core, D11REGOFFS(objaddr));
+       bcma_write32(core, D11REGOFFS(objdata), wlc_hw->LRL);
 
        /* write rate fallback retry limits */
        brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
        brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
 
-       AND_REG(&regs->ifs_ctl, 0x0FFF);
-       W_REG(&regs->ifs_aifsn, EDCF_AIFSN_MIN);
+       bcma_mask16(core, D11REGOFFS(ifs_ctl), 0x0FFF);
+       bcma_write16(core, D11REGOFFS(ifs_aifsn), EDCF_AIFSN_MIN);
 
        /* init the tx dma engines */
        for (i = 0; i < NFIFO; i++) {
@@ -3584,29 +3576,31 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
 }
 
 /*
- * Set or clear maccontrol bits MCTL_PROMISC, MCTL_BCNS_PROMISC and
- * MCTL_KEEPCONTROL
+ * Set or clear filtering related maccontrol bits based on
+ * specified filter flags
  */
-static void brcms_c_mac_promisc(struct brcms_c_info *wlc)
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
 {
        u32 promisc_bits = 0;
 
-       if (wlc->bcnmisc_monitor)
+       wlc->filter_flags = filter_flags;
+
+       if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
+               promisc_bits |= MCTL_PROMISC;
+
+       if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
                promisc_bits |= MCTL_BCNS_PROMISC;
 
-       if (wlc->monitor)
-               promisc_bits |=
-                       MCTL_PROMISC | MCTL_BCNS_PROMISC | MCTL_KEEPCONTROL;
+       if (filter_flags & FIF_FCSFAIL)
+               promisc_bits |= MCTL_KEEPBADFCS;
 
-       brcms_b_mctrl(wlc->hw,
-                       MCTL_PROMISC | MCTL_BCNS_PROMISC | MCTL_KEEPCONTROL,
-                       promisc_bits);
-}
+       if (filter_flags & (FIF_CONTROL | FIF_PSPOLL))
+               promisc_bits |= MCTL_KEEPCONTROL;
 
-void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
-{
-       wlc->bcnmisc_monitor = promisc;
-       brcms_c_mac_promisc(wlc);
+       brcms_b_mctrl(wlc->hw,
+               MCTL_PROMISC | MCTL_BCNS_PROMISC |
+               MCTL_KEEPCONTROL | MCTL_KEEPBADFCS,
+               promisc_bits);
 }
 
 /*
@@ -3636,9 +3630,6 @@ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
        } else {
                /* disable an active IBSS if we are not on the home channel */
        }
-
-       /* update the various promisc bits */
-       brcms_c_mac_promisc(wlc);
 }
 
 static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
@@ -3813,7 +3804,7 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
 
        BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps);
 
-       v1 = R_REG(&wlc->regs->maccontrol);
+       v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
        v2 = MCTL_WAKE;
        if (hps)
                v2 |= MCTL_HPS;
@@ -4132,7 +4123,8 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
                acp_shm.cwmax = params->cw_max;
                acp_shm.cwcur = acp_shm.cwmin;
                acp_shm.bslots =
-                   R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur;
+                       bcma_read16(wlc->hw->d11core, D11REGOFFS(tsf_random)) &
+                       acp_shm.cwcur;
                acp_shm.reggap = acp_shm.bslots + acp_shm.aifs;
                /* Indicate the new params to the ucode */
                acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO +
@@ -4440,21 +4432,21 @@ struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc)
  *    initialize software state for each core and band
  *    put the whole chip in reset(driver down state), no clock
  */
-static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
-                         uint unit, bool piomode, void __iomem *regsva,
-                         struct pci_dev *btparam)
+static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
+                         uint unit, bool piomode)
 {
        struct brcms_hardware *wlc_hw;
-       struct d11regs __iomem *regs;
        char *macaddr = NULL;
        uint err = 0;
        uint j;
        bool wme = false;
        struct shared_phy_params sha_params;
        struct wiphy *wiphy = wlc->wiphy;
+       struct pci_dev *pcidev = core->bus->host_pci;
 
-       BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor,
-               device);
+       BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
+              pcidev->vendor,
+              pcidev->device);
 
        wme = true;
 
@@ -4471,7 +4463,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
         * Do the hardware portion of the attach. Also initialize software
         * state that depends on the particular hardware we are running.
         */
-       wlc_hw->sih = ai_attach(regsva, btparam);
+       wlc_hw->sih = ai_attach(core->bus);
        if (wlc_hw->sih == NULL) {
                wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n",
                          unit);
@@ -4480,25 +4472,19 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
        }
 
        /* verify again the device is supported */
-       if (!brcms_c_chipmatch(vendor, device)) {
+       if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
                wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
                        "vendor/device (0x%x/0x%x)\n",
-                        unit, vendor, device);
+                        unit, pcidev->vendor, pcidev->device);
                err = 12;
                goto fail;
        }
 
-       wlc_hw->vendorid = vendor;
-       wlc_hw->deviceid = device;
-
-       /* set bar0 window to point at D11 core */
-       wlc_hw->regs = (struct d11regs __iomem *)
-                               ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
-       wlc_hw->corerev = ai_corerev(wlc_hw->sih);
-
-       regs = wlc_hw->regs;
+       wlc_hw->vendorid = pcidev->vendor;
+       wlc_hw->deviceid = pcidev->device;
 
-       wlc->regs = wlc_hw->regs;
+       wlc_hw->d11core = core;
+       wlc_hw->corerev = core->id.rev;
 
        /* validate chip, chiprev and corerev */
        if (!brcms_c_isgoodchip(wlc_hw)) {
@@ -4533,8 +4519,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
        wlc_hw->boardrev = (u16) j;
        if (!brcms_c_validboardtype(wlc_hw)) {
                wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom "
-                       "board type (0x%x)" " or revision level (0x%x)\n",
-                        unit, wlc_hw->sih->boardtype, wlc_hw->boardrev);
+                         "board type (0x%x)" " or revision level (0x%x)\n",
+                         unit, ai_get_boardtype(wlc_hw->sih),
+                         wlc_hw->boardrev);
                err = 15;
                goto fail;
        }
@@ -4555,7 +4542,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
        else
                wlc_hw->_nbands = 1;
 
-       if ((wlc_hw->sih->chip == BCM43225_CHIP_ID))
+       if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
                wlc_hw->_nbands = 1;
 
        /* BMAC_NOTE: remove init of pub values when brcms_c_attach()
@@ -4587,16 +4574,14 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
        sha_params.corerev = wlc_hw->corerev;
        sha_params.vid = wlc_hw->vendorid;
        sha_params.did = wlc_hw->deviceid;
-       sha_params.chip = wlc_hw->sih->chip;
-       sha_params.chiprev = wlc_hw->sih->chiprev;
-       sha_params.chippkg = wlc_hw->sih->chippkg;
+       sha_params.chip = ai_get_chip_id(wlc_hw->sih);
+       sha_params.chiprev = ai_get_chiprev(wlc_hw->sih);
+       sha_params.chippkg = ai_get_chippkg(wlc_hw->sih);
        sha_params.sromrev = wlc_hw->sromrev;
-       sha_params.boardtype = wlc_hw->sih->boardtype;
+       sha_params.boardtype = ai_get_boardtype(wlc_hw->sih);
        sha_params.boardrev = wlc_hw->boardrev;
-       sha_params.boardvendor = wlc_hw->sih->boardvendor;
        sha_params.boardflags = wlc_hw->boardflags;
        sha_params.boardflags2 = wlc_hw->boardflags2;
-       sha_params.buscorerev = wlc_hw->sih->buscorerev;
 
        /* alloc and save pointer to shared phy state area */
        wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params);
@@ -4618,9 +4603,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
                wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
                wlc->band->bandunit = j;
                wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
-               wlc->core->coreidx = ai_coreidx(wlc_hw->sih);
+               wlc->core->coreidx = core->core_index;
 
-               wlc_hw->machwcap = R_REG(&regs->machwcap);
+               wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap));
                wlc_hw->machwcap_backup = wlc_hw->machwcap;
 
                /* init tx fifo size */
@@ -4629,7 +4614,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
 
                /* Get a phy for this band */
                wlc_hw->band->pi =
-                       wlc_phy_attach(wlc_hw->phy_sh, regs,
+                       wlc_phy_attach(wlc_hw->phy_sh, core,
                                       wlc_hw->band->bandtype,
                                       wlc->wiphy);
                if (wlc_hw->band->pi == NULL) {
@@ -4703,10 +4688,6 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
        /* Match driver "down" state */
        ai_pci_down(wlc_hw->sih);
 
-       /* register sb interrupt callback functions */
-       ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff,
-                                 (void *)brcms_c_wlintrsrestore, NULL, wlc);
-
        /* turn off pll and xtal to match driver "down" state */
        brcms_b_xtal(wlc_hw, OFF);
 
@@ -4737,10 +4718,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
                goto fail;
        }
 
-       BCMMSG(wlc->wiphy,
-                "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
-                wlc_hw->deviceid, wlc_hw->_nbands,
-                wlc_hw->sih->boardtype, macaddr);
+       BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
+              wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih),
+              macaddr);
 
        return err;
 
@@ -4978,7 +4958,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
                 * and per-port interrupt object may has been freed. this must
                 * be done before sb core switch
                 */
-               ai_deregister_intr_callback(wlc_hw->sih);
                ai_pci_sleep(wlc_hw->sih);
        }
 
@@ -5073,13 +5052,11 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
        ai_pci_fixcfg(wlc_hw->sih);
 
        /*
+        * TODO: test suspend/resume
+        *
         * AI chip doesn't restore bar0win2 on
         * hibernation/resume, need sw fixup
         */
-       if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
-           (wlc_hw->sih->chip == BCM43225_CHIP_ID))
-               wlc_hw->regs = (struct d11regs __iomem *)
-                               ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
 
        /*
         * Inform phy that a POR reset has occurred so
@@ -5091,7 +5068,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
        wlc_hw->wlc->pub->hw_up = true;
 
        if ((wlc_hw->boardflags & BFL_FEM)
-           && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
+           && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
                if (!
                    (wlc_hw->boardrev >= 0x1250
                     && (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -5186,7 +5163,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
        }
 
        if ((wlc->pub->boardflags & BFL_FEM)
-           && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) {
+           && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) {
                if (wlc->pub->boardrev >= 0x1250
                    && (wlc->pub->boardflags & BFL_FEM_BT))
                        brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
@@ -5323,9 +5300,9 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
        } else {
 
                /* Reset and disable the core */
-               if (ai_iscoreup(wlc_hw->sih)) {
-                       if (R_REG(&wlc_hw->regs->maccontrol) &
-                           MCTL_EN_MAC)
+               if (bcma_core_is_enabled(wlc_hw->d11core)) {
+                       if (bcma_read32(wlc_hw->d11core,
+                                       D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
                                brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
                        callbacks += brcms_reset(wlc_hw->wlc->wl);
                        brcms_c_coredisable(wlc_hw);
@@ -7482,11 +7459,11 @@ static void
 brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
                  u32 *tsf_h_ptr)
 {
-       struct d11regs __iomem *regs = wlc_hw->regs;
+       struct bcma_device *core = wlc_hw->d11core;
 
        /* read the tsf timer low, then high to get an atomic read */
-       *tsf_l_ptr = R_REG(&regs->tsf_timerlow);
-       *tsf_h_ptr = R_REG(&regs->tsf_timerhigh);
+       *tsf_l_ptr = bcma_read32(core, D11REGOFFS(tsf_timerlow));
+       *tsf_h_ptr = bcma_read32(core, D11REGOFFS(tsf_timerhigh));
 }
 
 /*
@@ -8074,14 +8051,8 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
        len = p->len;
 
        if (rxh->RxStatus1 & RXS_FCSERR) {
-               if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
-                       wiphy_err(wlc->wiphy, "FCSERR while scanning******* -"
-                                 " tossing\n");
-                       goto toss;
-               } else {
-                       wiphy_err(wlc->wiphy, "RCSERR!!!\n");
+               if (!(wlc->filter_flags & FIF_FCSFAIL))
                        goto toss;
-               }
        }
 
        /* check received pkt has at least frame control field */
@@ -8165,7 +8136,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
 {
        u32 macintstatus;
        struct brcms_hardware *wlc_hw = wlc->hw;
-       struct d11regs __iomem *regs = wlc_hw->regs;
+       struct bcma_device *core = wlc_hw->d11core;
        struct wiphy *wiphy = wlc->wiphy;
 
        if (brcms_deviceremoved(wlc)) {
@@ -8201,7 +8172,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
        /* ATIM window end */
        if (macintstatus & MI_ATIMWINEND) {
                BCMMSG(wlc->wiphy, "end of ATIM window\n");
-               OR_REG(&regs->maccommand, wlc->qvalid);
+               bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid);
                wlc->qvalid = 0;
        }
 
@@ -8219,17 +8190,17 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
 
        if (macintstatus & MI_GP0) {
                wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d "
-                       "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
+                         "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
 
                printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
-                                       __func__, wlc_hw->sih->chip,
-                                       wlc_hw->sih->chiprev);
+                           __func__, ai_get_chip_id(wlc_hw->sih),
+                           ai_get_chiprev(wlc_hw->sih));
                brcms_fatal_error(wlc_hw->wlc->wl);
        }
 
        /* gptimer timeout */
        if (macintstatus & MI_TO)
-               W_REG(&regs->gptimer, 0);
+               bcma_write32(core, D11REGOFFS(gptimer), 0);
 
        if (macintstatus & MI_RFDISABLE) {
                BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
@@ -8251,13 +8222,11 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
 
 void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
 {
-       struct d11regs __iomem *regs;
+       struct bcma_device *core = wlc->hw->d11core;
        u16 chanspec;
 
        BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
 
-       regs = wlc->regs;
-
        /*
         * This will happen if a big-hammer was executed. In
         * that case, we want to go back to the channel that
@@ -8287,8 +8256,8 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
                 * update since init path would reset
                 * to default value
                 */
-               W_REG(&regs->tsf_cfprep,
-                     (bi << CFPREP_CBI_SHIFT));
+               bcma_write32(core, D11REGOFFS(tsf_cfprep),
+                            bi << CFPREP_CBI_SHIFT);
 
                /* Update maccontrol PM related bits */
                brcms_c_set_ps_ctrl(wlc);
@@ -8318,7 +8287,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
        brcms_c_bsinit(wlc);
 
        /* Enable EDCF mode (while the MAC is suspended) */
-       OR_REG(&regs->ifs_ctl, IFS_USEEDCF);
+       bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF);
        brcms_c_edcf_setparams(wlc, false);
 
        /* Init precedence maps for empty FIFOs */
@@ -8342,7 +8311,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
        brcms_c_txflowcontrol_reset(wlc);
 
        /* enable the RF Disable Delay timer */
-       W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT);
+       bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT);
 
        /*
         * Initialize WME parameters; if they haven't been set by some other
@@ -8362,9 +8331,8 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
  * The common driver entry routine. Error codes should be unique
  */
 struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
-              bool piomode, void __iomem *regsva, struct pci_dev *btparam,
-              uint *perr)
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+              bool piomode, uint *perr)
 {
        struct brcms_c_info *wlc;
        uint err = 0;
@@ -8372,7 +8340,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
        struct brcms_pub *pub;
 
        /* allocate struct brcms_c_info state and its substructures */
-       wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device);
+       wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0);
        if (wlc == NULL)
                goto fail;
        wlc->wiphy = wl->wiphy;
@@ -8399,8 +8367,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
         * low level attach steps(all hw accesses go
         * inside, no more in rest of the attach)
         */
-       err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva,
-                            btparam);
+       err = brcms_b_attach(wlc, core, unit, piomode);
        if (err)
                goto fail;
 
index 251c350..adb136e 100644 (file)
@@ -334,7 +334,7 @@ struct brcms_hardware {
        u32 machwcap_backup;    /* backup of machwcap */
 
        struct si_pub *sih;     /* SI handle (cookie for siutils calls) */
-       struct d11regs __iomem *regs;   /* pointer to device registers */
+       struct bcma_device *d11core;    /* pointer to 802.11 core */
        struct phy_shim_info *physhim; /* phy shim layer handler */
        struct shared_phy *phy_sh;      /* pointer to shared phy state */
        struct brcms_hw_band *band;/* pointer to active per-band state */
@@ -400,7 +400,6 @@ struct brcms_txq_info {
  *
  * pub: pointer to driver public state.
  * wl: pointer to specific private state.
- * regs: pointer to device registers.
  * hw: HW related state.
  * clkreq_override: setting for clkreq for PCIE : Auto, 0, 1.
  * fastpwrup_dly: time in us needed to bring up d11 fast clock.
@@ -477,7 +476,6 @@ struct brcms_txq_info {
 struct brcms_c_info {
        struct brcms_pub *pub;
        struct brcms_info *wl;
-       struct d11regs __iomem *regs;
        struct brcms_hardware *hw;
 
        /* clock */
@@ -519,8 +517,7 @@ struct brcms_c_info {
        struct brcms_timer *radio_timer;
 
        /* promiscuous */
-       bool monitor;
-       bool bcnmisc_monitor;
+       uint filter_flags;
 
        /* driver feature */
        bool _rifs;
@@ -658,8 +655,7 @@ extern void brcms_c_print_txdesc(struct d11txh *txh);
 #endif
 
 extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
-                                          bool promisc);
+extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
 extern void brcms_c_send_q(struct brcms_c_info *wlc);
 extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
                            uint *fifo);
index 0bcb267..7fad6dc 100644 (file)
 #define SRSH_PI_MASK   0xf000  /* bit 15:12 */
 #define SRSH_PI_SHIFT  12      /* bit 15:12 */
 
+#define PCIREGOFFS(field)      offsetof(struct sbpciregs, field)
+#define PCIEREGOFFS(field)     offsetof(struct sbpcieregs, field)
+
 /* Sonics side: PCI core and host control registers */
 struct sbpciregs {
        u32 control;            /* PCI control */
@@ -205,11 +208,7 @@ struct sbpcieregs {
 };
 
 struct pcicore_info {
-       union {
-               struct sbpcieregs __iomem *pcieregs;
-               struct sbpciregs __iomem *pciregs;
-       } regs;                 /* Memory mapped register to the core */
-
+       struct bcma_device *core;
        struct si_pub *sih;     /* System interconnect handle */
        struct pci_dev *dev;
        u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
@@ -224,9 +223,9 @@ struct pcicore_info {
 };
 
 #define PCIE_ASPM(sih)                                                 \
-       (((sih)->buscoretype == PCIE_CORE_ID) &&                        \
-        (((sih)->buscorerev >= 3) &&                                   \
-         ((sih)->buscorerev <= 5)))
+       ((ai_get_buscoretype(sih) == PCIE_CORE_ID) &&                   \
+        ((ai_get_buscorerev(sih) >= 3) &&                              \
+         (ai_get_buscorerev(sih) <= 5)))
 
 
 /* delay needed between the mdio control/ mdiodata register data access */
@@ -238,8 +237,7 @@ static void pr28829_delay(void)
 /* Initialize the PCI core.
  * It's caller's responsibility to make sure that this is done only once
  */
-struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
-                                 void __iomem *regs)
+struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core)
 {
        struct pcicore_info *pi;
 
@@ -249,17 +247,15 @@ struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
                return NULL;
 
        pi->sih = sih;
-       pi->dev = pdev;
+       pi->dev = core->bus->host_pci;
+       pi->core = core;
 
-       if (sih->buscoretype == PCIE_CORE_ID) {
+       if (core->id.id == PCIE_CORE_ID) {
                u8 cap_ptr;
-               pi->regs.pcieregs = regs;
                cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
                                                      NULL, NULL);
                pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
-       } else
-               pi->regs.pciregs = regs;
-
+       }
        return pi;
 }
 
@@ -334,37 +330,37 @@ end:
 
 /* ***** Register Access API */
 static uint
-pcie_readreg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset)
+pcie_readreg(struct bcma_device *core, uint addrtype, uint offset)
 {
        uint retval = 0xFFFFFFFF;
 
        switch (addrtype) {
        case PCIE_CONFIGREGS:
-               W_REG(&pcieregs->configaddr, offset);
-               (void)R_REG((&pcieregs->configaddr));
-               retval = R_REG(&pcieregs->configdata);
+               bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+               (void)bcma_read32(core, PCIEREGOFFS(configaddr));
+               retval = bcma_read32(core, PCIEREGOFFS(configdata));
                break;
        case PCIE_PCIEREGS:
-               W_REG(&pcieregs->pcieindaddr, offset);
-               (void)R_REG(&pcieregs->pcieindaddr);
-               retval = R_REG(&pcieregs->pcieinddata);
+               bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+               (void)bcma_read32(core, PCIEREGOFFS(pcieindaddr));
+               retval = bcma_read32(core, PCIEREGOFFS(pcieinddata));
                break;
        }
 
        return retval;
 }
 
-static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
+static uint pcie_writereg(struct bcma_device *core, uint addrtype,
                          uint offset, uint val)
 {
        switch (addrtype) {
        case PCIE_CONFIGREGS:
-               W_REG((&pcieregs->configaddr), offset);
-               W_REG((&pcieregs->configdata), val);
+               bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+               bcma_write32(core, PCIEREGOFFS(configdata), val);
                break;
        case PCIE_PCIEREGS:
-               W_REG((&pcieregs->pcieindaddr), offset);
-               W_REG((&pcieregs->pcieinddata), val);
+               bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+               bcma_write32(core, PCIEREGOFFS(pcieinddata), val);
                break;
        default:
                break;
@@ -374,7 +370,6 @@ static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
 
 static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
 {
-       struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
        uint mdiodata, i = 0;
        uint pcie_serdes_spinwait = 200;
 
@@ -382,12 +377,13 @@ static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
                    (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
                    (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
                    (blk << 4));
-       W_REG(&pcieregs->mdiodata, mdiodata);
+       bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
 
        pr28829_delay();
        /* retry till the transaction is complete */
        while (i < pcie_serdes_spinwait) {
-               if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE)
+               if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+                   MDIOCTL_ACCESS_DONE)
                        break;
 
                udelay(1000);
@@ -404,15 +400,15 @@ static int
 pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
            uint *val)
 {
-       struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
        uint mdiodata;
        uint i = 0;
        uint pcie_serdes_spinwait = 10;
 
        /* enable mdio access to SERDES */
-       W_REG(&pcieregs->mdiocontrol, MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
+       bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol),
+                    MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
 
-       if (pi->sih->buscorerev >= 10) {
+       if (ai_get_buscorerev(pi->sih) >= 10) {
                /* new serdes is slower in rw,
                 * using two layers of reg address mapping
                 */
@@ -432,20 +428,22 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
                mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
                             *val);
 
-       W_REG(&pcieregs->mdiodata, mdiodata);
+       bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
 
        pr28829_delay();
 
        /* retry till the transaction is complete */
        while (i < pcie_serdes_spinwait) {
-               if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) {
+               if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+                   MDIOCTL_ACCESS_DONE) {
                        if (!write) {
                                pr28829_delay();
-                               *val = (R_REG(&pcieregs->mdiodata) &
+                               *val = (bcma_read32(pi->core,
+                                                   PCIEREGOFFS(mdiodata)) &
                                        MDIODATA_MASK);
                        }
                        /* Disable mdio access to SERDES */
-                       W_REG(&pcieregs->mdiocontrol, 0);
+                       bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
                        return 0;
                }
                udelay(1000);
@@ -453,7 +451,7 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
        }
 
        /* Timed out. Disable mdio access to SERDES. */
-       W_REG(&pcieregs->mdiocontrol, 0);
+       bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
        return 1;
 }
 
@@ -502,18 +500,18 @@ static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
 {
        u32 w;
        struct si_pub *sih = pi->sih;
-       struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
 
-       if (sih->buscoretype != PCIE_CORE_ID || sih->buscorerev < 7)
+       if (ai_get_buscoretype(sih) != PCIE_CORE_ID ||
+           ai_get_buscorerev(sih) < 7)
                return;
 
-       w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+       w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
        if (extend)
                w |= PCIE_ASPMTIMER_EXTEND;
        else
                w &= ~PCIE_ASPMTIMER_EXTEND;
-       pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
-       w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+       pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
+       w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
 }
 
 /* centralized clkreq control policy */
@@ -527,25 +525,27 @@ static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
                        pcie_clkreq(pi, 1, 0);
                break;
        case SI_PCIDOWN:
-               if (sih->buscorerev == 6) {     /* turn on serdes PLL down */
-                       ai_corereg(sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, chipcontrol_addr),
-                                  ~0, 0);
-                       ai_corereg(sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, chipcontrol_data),
-                                  ~0x40, 0);
+               /* turn on serdes PLL down */
+               if (ai_get_buscorerev(sih) == 6) {
+                       ai_cc_reg(sih,
+                                 offsetof(struct chipcregs, chipcontrol_addr),
+                                 ~0, 0);
+                       ai_cc_reg(sih,
+                                 offsetof(struct chipcregs, chipcontrol_data),
+                                 ~0x40, 0);
                } else if (pi->pcie_pr42767) {
                        pcie_clkreq(pi, 1, 1);
                }
                break;
        case SI_PCIUP:
-               if (sih->buscorerev == 6) {     /* turn off serdes PLL down */
-                       ai_corereg(sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, chipcontrol_addr),
-                                  ~0, 0);
-                       ai_corereg(sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, chipcontrol_data),
-                                  ~0x40, 0x40);
+               /* turn off serdes PLL down */
+               if (ai_get_buscorerev(sih) == 6) {
+                       ai_cc_reg(sih,
+                                 offsetof(struct chipcregs, chipcontrol_addr),
+                                 ~0, 0);
+                       ai_cc_reg(sih,
+                                 offsetof(struct chipcregs, chipcontrol_data),
+                                 ~0x40, 0x40);
                } else if (PCIE_ASPM(sih)) {    /* disable clkreq */
                        pcie_clkreq(pi, 1, 0);
                }
@@ -562,7 +562,7 @@ static void pcie_war_polarity(struct pcicore_info *pi)
        if (pi->pcie_polarity != 0)
                return;
 
-       w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
+       w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
 
        /* Detect the current polarity at attach and force that polarity and
         * disable changing the polarity
@@ -581,18 +581,15 @@ static void pcie_war_polarity(struct pcicore_info *pi)
  */
 static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
 {
-       struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
        struct si_pub *sih = pi->sih;
        u16 val16;
-       u16 __iomem *reg16;
        u32 w;
 
        if (!PCIE_ASPM(sih))
                return;
 
        /* bypass this on QT or VSIM */
-       reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
-       val16 = R_REG(reg16);
+       val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]));
 
        val16 &= ~SRSH_ASPM_ENB;
        if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
@@ -602,15 +599,15 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
        else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
                val16 |= SRSH_ASPM_L0s_ENB;
 
-       W_REG(reg16, val16);
+       bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16);
 
        pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
        w &= ~PCIE_ASPM_ENAB;
        w |= pi->pcie_war_aspm_ovr;
        pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
 
-       reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
-       val16 = R_REG(reg16);
+       val16 = bcma_read16(pi->core,
+                           PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]));
 
        if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
                val16 |= SRSH_CLKREQ_ENB;
@@ -618,7 +615,8 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
        } else
                val16 &= ~SRSH_CLKREQ_ENB;
 
-       W_REG(reg16, val16);
+       bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]),
+                    val16);
 }
 
 /* Apply the polarity determined at the start */
@@ -642,16 +640,15 @@ static void pcie_war_serdes(struct pcicore_info *pi)
 /* Needs to happen when coming out of 'standby'/'hibernate' */
 static void pcie_misc_config_fixup(struct pcicore_info *pi)
 {
-       struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
        u16 val16;
-       u16 __iomem *reg16;
 
-       reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
-       val16 = R_REG(reg16);
+       val16 = bcma_read16(pi->core,
+                           PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]));
 
        if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
                val16 |= SRSH_L23READY_EXIT_NOPERST;
-               W_REG(reg16, val16);
+               bcma_write16(pi->core,
+                            PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16);
        }
 }
 
@@ -659,62 +656,57 @@ static void pcie_misc_config_fixup(struct pcicore_info *pi)
 /* Needs to happen when coming out of 'standby'/'hibernate' */
 static void pcie_war_noplldown(struct pcicore_info *pi)
 {
-       struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
-       u16 __iomem *reg16;
-
        /* turn off serdes PLL down */
-       ai_corereg(pi->sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol),
-                  CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
+       ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol),
+                 CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
 
        /* clear srom shadow backdoor */
-       reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
-       W_REG(reg16, 0);
+       bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0);
 }
 
 /* Needs to happen when coming out of 'standby'/'hibernate' */
 static void pcie_war_pci_setup(struct pcicore_info *pi)
 {
        struct si_pub *sih = pi->sih;
-       struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
        u32 w;
 
-       if (sih->buscorerev == 0 || sih->buscorerev == 1) {
-               w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+       if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) {
+               w = pcie_readreg(pi->core, PCIE_PCIEREGS,
                                 PCIE_TLP_WORKAROUNDSREG);
                w |= 0x8;
-               pcie_writereg(pcieregs, PCIE_PCIEREGS,
+               pcie_writereg(pi->core, PCIE_PCIEREGS,
                              PCIE_TLP_WORKAROUNDSREG, w);
        }
 
-       if (sih->buscorerev == 1) {
-               w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
+       if (ai_get_buscorerev(sih) == 1) {
+               w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
                w |= 0x40;
-               pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
+               pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
        }
 
-       if (sih->buscorerev == 0) {
+       if (ai_get_buscorerev(sih) == 0) {
                pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
                pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
                pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
        } else if (PCIE_ASPM(sih)) {
                /* Change the L1 threshold for better performance */
-               w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+               w = pcie_readreg(pi->core, PCIE_PCIEREGS,
                                 PCIE_DLLP_PMTHRESHREG);
                w &= ~PCIE_L1THRESHOLDTIME_MASK;
                w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
-               pcie_writereg(pcieregs, PCIE_PCIEREGS,
+               pcie_writereg(pi->core, PCIE_PCIEREGS,
                              PCIE_DLLP_PMTHRESHREG, w);
 
                pcie_war_serdes(pi);
 
                pcie_war_aspm_clkreq(pi);
-       } else if (pi->sih->buscorerev == 7)
+       } else if (ai_get_buscorerev(pi->sih) == 7)
                pcie_war_noplldown(pi);
 
        /* Note that the fix is actually in the SROM,
         * that's why this is open-ended
         */
-       if (pi->sih->buscorerev >= 6)
+       if (ai_get_buscorerev(pi->sih) >= 6)
                pcie_misc_config_fixup(pi);
 }
 
@@ -745,7 +737,7 @@ void pcicore_attach(struct pcicore_info *pi, int state)
 
 void pcicore_hwup(struct pcicore_info *pi)
 {
-       if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+       if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
                return;
 
        pcie_war_pci_setup(pi);
@@ -753,7 +745,7 @@ void pcicore_hwup(struct pcicore_info *pi)
 
 void pcicore_up(struct pcicore_info *pi, int state)
 {
-       if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+       if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
                return;
 
        /* Restore L1 timer for better performance */
@@ -781,7 +773,7 @@ void pcicore_sleep(struct pcicore_info *pi)
 
 void pcicore_down(struct pcicore_info *pi, int state)
 {
-       if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+       if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
                return;
 
        pcie_clkreq_upd(pi, state);
@@ -790,46 +782,45 @@ void pcicore_down(struct pcicore_info *pi, int state)
        pcie_extendL1timer(pi, false);
 }
 
-/* precondition: current core is sii->buscoretype */
-static void pcicore_fixcfg(struct pcicore_info *pi, u16 __iomem *reg16)
+void pcicore_fixcfg(struct pcicore_info *pi)
 {
-       struct si_info *sii = (struct si_info *)(pi->sih);
+       struct bcma_device *core = pi->core;
        u16 val16;
-       uint pciidx;
+       uint regoff;
 
-       pciidx = ai_coreidx(&sii->pub);
-       val16 = R_REG(reg16);
-       if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) {
-               val16 = (u16)(pciidx << SRSH_PI_SHIFT) |
-                       (val16 & ~SRSH_PI_MASK);
-               W_REG(reg16, val16);
-       }
-}
+       switch (pi->core->id.id) {
+       case BCMA_CORE_PCI:
+               regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]);
+               break;
 
-void
-pcicore_fixcfg_pci(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
-{
-       pcicore_fixcfg(pi, &pciregs->sprom[SRSH_PI_OFFSET]);
-}
+       case BCMA_CORE_PCIE:
+               regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]);
+               break;
 
-void pcicore_fixcfg_pcie(struct pcicore_info *pi,
-                        struct sbpcieregs __iomem *pcieregs)
-{
-       pcicore_fixcfg(pi, &pcieregs->sprom[SRSH_PI_OFFSET]);
+       default:
+               return;
+       }
+
+       val16 = bcma_read16(pi->core, regoff);
+       if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) !=
+           (u16)core->core_index) {
+               val16 = ((u16)core->core_index << SRSH_PI_SHIFT) |
+                       (val16 & ~SRSH_PI_MASK);
+               bcma_write16(pi->core, regoff, val16);
+       }
 }
 
 /* precondition: current core is pci core */
 void
-pcicore_pci_setup(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
+pcicore_pci_setup(struct pcicore_info *pi)
 {
-       u32 w;
-
-       OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST);
-
-       if (((struct si_info *)(pi->sih))->pub.buscorerev >= 11) {
-               OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
-               w = R_REG(&pciregs->clkrun);
-               W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL);
-               w = R_REG(&pciregs->clkrun);
+       bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+                  SBTOPCI_PREF | SBTOPCI_BURST);
+
+       if (pi->core->id.rev >= 11) {
+               bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+                          SBTOPCI_RC_READMULTI);
+               bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL);
+               (void)bcma_read32(pi->core, PCIREGOFFS(clkrun));
        }
 }
index 58aa80d..9fc3ead 100644 (file)
@@ -62,8 +62,7 @@ struct sbpciregs;
 struct sbpcieregs;
 
 extern struct pcicore_info *pcicore_init(struct si_pub *sih,
-                                        struct pci_dev *pdev,
-                                        void __iomem *regs);
+                                        struct bcma_device *core);
 extern void pcicore_deinit(struct pcicore_info *pch);
 extern void pcicore_attach(struct pcicore_info *pch, int state);
 extern void pcicore_hwup(struct pcicore_info *pch);
@@ -72,11 +71,7 @@ extern void pcicore_sleep(struct pcicore_info *pch);
 extern void pcicore_down(struct pcicore_info *pch, int state);
 extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
                                      unsigned char *buf, u32 *buflen);
-extern void pcicore_fixcfg_pci(struct pcicore_info *pch,
-                              struct sbpciregs __iomem *pciregs);
-extern void pcicore_fixcfg_pcie(struct pcicore_info *pch,
-                               struct sbpcieregs __iomem *pciregs);
-extern void pcicore_pci_setup(struct pcicore_info *pch,
-                             struct sbpciregs __iomem *pciregs);
+extern void pcicore_fixcfg(struct pcicore_info *pch);
+extern void pcicore_pci_setup(struct pcicore_info *pch);
 
 #endif /* _BRCM_NICPCI_H_ */
index edf5515..f1ca126 100644 (file)
@@ -77,7 +77,7 @@ struct otp_fn_s {
 };
 
 struct otpinfo {
-       uint ccrev;             /* chipc revision */
+       struct bcma_device *core; /* chipc core */
        const struct otp_fn_s *fn;      /* OTP functions */
        struct si_pub *sih;             /* Saved sb handle */
 
@@ -133,9 +133,10 @@ struct otpinfo {
 #define OTP_SZ_FU_144          (144/8) /* 144 bits */
 
 static u16
-ipxotp_otpr(struct otpinfo *oi, struct chipcregs __iomem *cc, uint wn)
+ipxotp_otpr(struct otpinfo *oi, uint wn)
 {
-       return R_REG(&cc->sromotp[wn]);
+       return bcma_read16(oi->core,
+                          CHIPCREGOFFS(sromotp[wn]));
 }
 
 /*
@@ -146,7 +147,7 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
 {
        int ret = 0;
 
-       switch (sih->chip) {
+       switch (ai_get_chip_id(sih)) {
        case BCM43224_CHIP_ID:
        case BCM43225_CHIP_ID:
                ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
@@ -161,19 +162,21 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
        return ret;
 }
 
-static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
+static void _ipxotp_init(struct otpinfo *oi)
 {
        uint k;
        u32 otpp, st;
+       int ccrev = ai_get_ccrev(oi->sih);
+
 
        /*
         * record word offset of General Use Region
         * for various chipcommon revs
         */
-       if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
-           || oi->sih->ccrev == 27) {
+       if (ccrev == 21 || ccrev == 24
+           || ccrev == 27) {
                oi->otpgu_base = REVA4_OTPGU_BASE;
-       } else if (oi->sih->ccrev == 36) {
+       } else if (ccrev == 36) {
                /*
                 * OTP size greater than equal to 2KB (128 words),
                 * otpgu_base is similar to rev23
@@ -182,7 +185,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
                        oi->otpgu_base = REVB8_OTPGU_BASE;
                else
                        oi->otpgu_base = REV36_OTPGU_BASE;
-       } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) {
+       } else if (ccrev == 23 || ccrev >= 25) {
                oi->otpgu_base = REVB8_OTPGU_BASE;
        }
 
@@ -190,24 +193,21 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
        otpp =
            OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
 
-       W_REG(&cc->otpprog, otpp);
-       for (k = 0;
-            ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
-            && (k < OTPP_TRIES); k++)
-               ;
+       bcma_write32(oi->core, CHIPCREGOFFS(otpprog), otpp);
+       st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
+       for (k = 0; (st & OTPP_START_BUSY) && (k < OTPP_TRIES); k++)
+               st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
        if (k >= OTPP_TRIES)
                return;
 
        /* Read OTP lock bits and subregion programmed indication bits */
-       oi->status = R_REG(&cc->otpstatus);
+       oi->status = bcma_read32(oi->core, CHIPCREGOFFS(otpstatus));
 
-       if ((oi->sih->chip == BCM43224_CHIP_ID)
-           || (oi->sih->chip == BCM43225_CHIP_ID)) {
+       if ((ai_get_chip_id(oi->sih) == BCM43224_CHIP_ID)
+           || (ai_get_chip_id(oi->sih) == BCM43225_CHIP_ID)) {
                u32 p_bits;
-               p_bits =
-                   (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
-                    OTPGU_P_MSK)
-                   >> OTPGU_P_SHIFT;
+               p_bits = (ipxotp_otpr(oi, oi->otpgu_base + OTPGU_P_OFF) &
+                         OTPGU_P_MSK) >> OTPGU_P_SHIFT;
                oi->status |= (p_bits << OTPS_GUP_SHIFT);
        }
 
@@ -220,7 +220,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
        oi->hwlim = oi->wsize;
        if (oi->status & OTPS_GUP_HW) {
                oi->hwlim =
-                   ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
+                   ipxotp_otpr(oi, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
                oi->swbase = oi->hwlim;
        } else
                oi->swbase = oi->hwbase;
@@ -230,7 +230,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
 
        if (oi->status & OTPS_GUP_SW) {
                oi->swlim =
-                   ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
+                   ipxotp_otpr(oi, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
                oi->fbase = oi->swlim;
        } else
                oi->fbase = oi->swbase;
@@ -240,11 +240,8 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
 
 static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
 {
-       uint idx;
-       struct chipcregs __iomem *cc;
-
        /* Make sure we're running IPX OTP */
-       if (!OTPTYPE_IPX(sih->ccrev))
+       if (!OTPTYPE_IPX(ai_get_ccrev(sih)))
                return -EBADE;
 
        /* Make sure OTP is not disabled */
@@ -252,7 +249,7 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
                return -EBADE;
 
        /* Check for otp size */
-       switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
+       switch ((ai_get_cccaps(sih) & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
        case 0:
                /* Nothing there */
                return -EBADE;
@@ -282,21 +279,13 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
        }
 
        /* Retrieve OTP region info */
-       idx = ai_coreidx(sih);
-       cc = ai_setcoreidx(sih, SI_CC_IDX);
-
-       _ipxotp_init(oi, cc);
-
-       ai_setcoreidx(sih, idx);
-
+       _ipxotp_init(oi);
        return 0;
 }
 
 static int
 ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
 {
-       uint idx;
-       struct chipcregs __iomem *cc;
        uint base, i, sz;
 
        /* Validate region selection */
@@ -365,14 +354,10 @@ ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
                return -EINVAL;
        }
 
-       idx = ai_coreidx(oi->sih);
-       cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
-
        /* Read the data */
        for (i = 0; i < sz; i++)
-               data[i] = ipxotp_otpr(oi, cc, base + i);
+               data[i] = ipxotp_otpr(oi, base + i);
 
-       ai_setcoreidx(oi->sih, idx);
        *wlen = sz;
        return 0;
 }
@@ -384,14 +369,13 @@ static const struct otp_fn_s ipxotp_fn = {
 
 static int otp_init(struct si_pub *sih, struct otpinfo *oi)
 {
-
        int ret;
 
        memset(oi, 0, sizeof(struct otpinfo));
 
-       oi->ccrev = sih->ccrev;
+       oi->core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
 
-       if (OTPTYPE_IPX(oi->ccrev))
+       if (OTPTYPE_IPX(ai_get_ccrev(sih)))
                oi->fn = &ipxotp_fn;
 
        if (oi->fn == NULL)
@@ -399,7 +383,7 @@ static int otp_init(struct si_pub *sih, struct otpinfo *oi)
 
        oi->sih = sih;
 
-       ret = (oi->fn->init) (sih, oi);
+       ret = (oi->fn->init)(sih, oi);
 
        return ret;
 }
index e17edf7..264f8c4 100644 (file)
@@ -109,7 +109,7 @@ static const struct chan_info_basic chan_info_all[] = {
        {204, 5020},
        {208, 5040},
        {212, 5060},
-       {216, 50800}
+       {216, 5080}
 };
 
 static const u8 ofdm_rate_lookup[] = {
@@ -149,9 +149,8 @@ void wlc_radioreg_enter(struct brcms_phy_pub *pih)
 void wlc_radioreg_exit(struct brcms_phy_pub *pih)
 {
        struct brcms_phy *pi = (struct brcms_phy *) pih;
-       u16 dummy;
 
-       dummy = R_REG(&pi->regs->phyversion);
+       (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
        pi->phy_wreg = 0;
        wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0);
 }
@@ -186,11 +185,11 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
        if ((D11REV_GE(pi->sh->corerev, 24)) ||
            (D11REV_IS(pi->sh->corerev, 22)
             && (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
-               W_REG_FLUSH(&pi->regs->radioregaddr, addr);
-               data = R_REG(&pi->regs->radioregdata);
+               bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+               data = bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
        } else {
-               W_REG_FLUSH(&pi->regs->phy4waddr, addr);
-               data = R_REG(&pi->regs->phy4wdatalo);
+               bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+               data = bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
        }
        pi->phy_wreg = 0;
 
@@ -203,15 +202,15 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
            (D11REV_IS(pi->sh->corerev, 22)
             && (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
 
-               W_REG_FLUSH(&pi->regs->radioregaddr, addr);
-               W_REG(&pi->regs->radioregdata, val);
+               bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+               bcma_write16(pi->d11core, D11REGOFFS(radioregdata), val);
        } else {
-               W_REG_FLUSH(&pi->regs->phy4waddr, addr);
-               W_REG(&pi->regs->phy4wdatalo, val);
+               bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+               bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
        }
 
        if (++pi->phy_wreg >= pi->phy_wreg_limit) {
-               (void)R_REG(&pi->regs->maccontrol);
+               (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
                pi->phy_wreg = 0;
        }
 }
@@ -223,19 +222,20 @@ static u32 read_radio_id(struct brcms_phy *pi)
        if (D11REV_GE(pi->sh->corerev, 24)) {
                u32 b0, b1, b2;
 
-               W_REG_FLUSH(&pi->regs->radioregaddr, 0);
-               b0 = (u32) R_REG(&pi->regs->radioregdata);
-               W_REG_FLUSH(&pi->regs->radioregaddr, 1);
-               b1 = (u32) R_REG(&pi->regs->radioregdata);
-               W_REG_FLUSH(&pi->regs->radioregaddr, 2);
-               b2 = (u32) R_REG(&pi->regs->radioregdata);
+               bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 0);
+               b0 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+               bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 1);
+               b1 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+               bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 2);
+               b2 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
 
                id = ((b0 & 0xf) << 28) | (((b2 << 8) | b1) << 12) | ((b0 >> 4)
                                                                      & 0xf);
        } else {
-               W_REG_FLUSH(&pi->regs->phy4waddr, RADIO_IDCODE);
-               id = (u32) R_REG(&pi->regs->phy4wdatalo);
-               id |= (u32) R_REG(&pi->regs->phy4wdatahi) << 16;
+               bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), RADIO_IDCODE);
+               id = (u32) bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
+               id |= (u32) bcma_read16(pi->d11core,
+                                       D11REGOFFS(phy4wdatahi)) << 16;
        }
        pi->phy_wreg = 0;
        return id;
@@ -275,75 +275,52 @@ void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
 
 void write_phy_channel_reg(struct brcms_phy *pi, uint val)
 {
-       W_REG(&pi->regs->phychannel, val);
+       bcma_write16(pi->d11core, D11REGOFFS(phychannel), val);
 }
 
 u16 read_phy_reg(struct brcms_phy *pi, u16 addr)
 {
-       struct d11regs __iomem *regs;
-
-       regs = pi->regs;
-
-       W_REG_FLUSH(&regs->phyregaddr, addr);
+       bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
 
        pi->phy_wreg = 0;
-       return R_REG(&regs->phyregdata);
+       return bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
 }
 
 void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
 {
-       struct d11regs __iomem *regs;
-
-       regs = pi->regs;
-
 #ifdef CONFIG_BCM47XX
-       W_REG_FLUSH(&regs->phyregaddr, addr);
-       W_REG(&regs->phyregdata, val);
+       bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+       bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val);
        if (addr == 0x72)
-               (void)R_REG(&regs->phyregdata);
+               (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
 #else
-       W_REG((u32 __iomem *)(&regs->phyregaddr), addr | (val << 16));
+       bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
        if (++pi->phy_wreg >= pi->phy_wreg_limit) {
                pi->phy_wreg = 0;
-               (void)R_REG(&regs->phyversion);
+               (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
        }
 #endif
 }
 
 void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
 {
-       struct d11regs __iomem *regs;
-
-       regs = pi->regs;
-
-       W_REG_FLUSH(&regs->phyregaddr, addr);
-
-       W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) & val));
+       bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+       bcma_mask16(pi->d11core, D11REGOFFS(phyregdata), val);
        pi->phy_wreg = 0;
 }
 
 void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
 {
-       struct d11regs __iomem *regs;
-
-       regs = pi->regs;
-
-       W_REG_FLUSH(&regs->phyregaddr, addr);
-
-       W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) | val));
+       bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+       bcma_set16(pi->d11core, D11REGOFFS(phyregdata), val);
        pi->phy_wreg = 0;
 }
 
 void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
 {
-       struct d11regs __iomem *regs;
-
-       regs = pi->regs;
-
-       W_REG_FLUSH(&regs->phyregaddr, addr);
-
-       W_REG(&regs->phyregdata,
-             ((R_REG(&regs->phyregdata) & ~mask) | (val & mask)));
+       val &= mask;
+       bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+       bcma_maskset16(pi->d11core, D11REGOFFS(phyregdata), ~mask, val);
        pi->phy_wreg = 0;
 }
 
@@ -404,10 +381,8 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
        sh->sromrev = shp->sromrev;
        sh->boardtype = shp->boardtype;
        sh->boardrev = shp->boardrev;
-       sh->boardvendor = shp->boardvendor;
        sh->boardflags = shp->boardflags;
        sh->boardflags2 = shp->boardflags2;
-       sh->buscorerev = shp->buscorerev;
 
        sh->fast_timer = PHY_SW_TIMER_FAST;
        sh->slow_timer = PHY_SW_TIMER_SLOW;
@@ -450,7 +425,7 @@ static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi)
 }
 
 struct brcms_phy_pub *
-wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
+wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
               int bandtype, struct wiphy *wiphy)
 {
        struct brcms_phy *pi;
@@ -462,7 +437,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
        if (D11REV_IS(sh->corerev, 4))
                sflags = SISF_2G_PHY | SISF_5G_PHY;
        else
-               sflags = ai_core_sflags(sh->sih, 0, 0);
+               sflags = bcma_aread32(d11core, BCMA_IOST);
 
        if (bandtype == BRCM_BAND_5G) {
                if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0)
@@ -480,7 +455,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
        if (pi == NULL)
                return NULL;
        pi->wiphy = wiphy;
-       pi->regs = regs;
+       pi->d11core = d11core;
        pi->sh = sh;
        pi->phy_init_por = true;
        pi->phy_wreg_limit = PHY_WREG_LIMIT;
@@ -495,7 +470,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
                pi->pubpi.coreflags = SICF_GMODE;
 
        wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags);
-       phyversion = R_REG(&pi->regs->phyversion);
+       phyversion = bcma_read16(pi->d11core, D11REGOFFS(phyversion));
 
        pi->pubpi.phy_type = PHY_TYPE(phyversion);
        pi->pubpi.phy_rev = phyversion & PV_PV_MASK;
@@ -507,8 +482,8 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
        pi->pubpi.phy_corenum = PHY_CORE_NUM_2;
        pi->pubpi.ana_rev = (phyversion & PV_AV_MASK) >> PV_AV_SHIFT;
 
-       if (!pi->pubpi.phy_type == PHY_TYPE_N &&
-           !pi->pubpi.phy_type == PHY_TYPE_LCN)
+       if (pi->pubpi.phy_type != PHY_TYPE_N &&
+           pi->pubpi.phy_type != PHY_TYPE_LCN)
                goto err;
 
        if (bandtype == BRCM_BAND_5G) {
@@ -779,14 +754,14 @@ void wlc_phy_init(struct brcms_phy_pub *pih, u16 chanspec)
 
        pi->radio_chanspec = chanspec;
 
-       mc = R_REG(&pi->regs->maccontrol);
+       mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
        if (WARN(mc & MCTL_EN_MAC, "HW error MAC running on init"))
                return;
 
        if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN))
                pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
 
-       if (WARN(!(ai_core_sflags(pi->sh->sih, 0, 0) & SISF_FCLKA),
+       if (WARN(!(bcma_aread32(pi->d11core, BCMA_IOST) & SISF_FCLKA),
                 "HW error SISF_FCLKA\n"))
                return;
 
@@ -825,8 +800,8 @@ void wlc_phy_cal_init(struct brcms_phy_pub *pih)
        struct brcms_phy *pi = (struct brcms_phy *) pih;
        void (*cal_init)(struct brcms_phy *) = NULL;
 
-       if (WARN((R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) != 0,
-                "HW error: MAC enabled during phy cal\n"))
+       if (WARN((bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+                 MCTL_EN_MAC) != 0, "HW error: MAC enabled during phy cal\n"))
                return;
 
        if (!pi->initialized) {
@@ -1017,7 +992,7 @@ wlc_phy_init_radio_regs(struct brcms_phy *pi,
 void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
 {
 #define DUMMY_PKT_LEN   20
-       struct d11regs __iomem *regs = pi->regs;
+       struct bcma_device *core = pi->d11core;
        int i, count;
        u8 ofdmpkt[DUMMY_PKT_LEN] = {
                0xcc, 0x01, 0x02, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00,
@@ -1033,26 +1008,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
        wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN,
                                      dummypkt);
 
-       W_REG(&regs->xmtsel, 0);
+       bcma_write16(core, D11REGOFFS(xmtsel), 0);
 
        if (D11REV_GE(pi->sh->corerev, 11))
-               W_REG(&regs->wepctl, 0x100);
+               bcma_write16(core, D11REGOFFS(wepctl), 0x100);
        else
-               W_REG(&regs->wepctl, 0);
+               bcma_write16(core, D11REGOFFS(wepctl), 0);
 
-       W_REG(&regs->txe_phyctl, (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
+       bcma_write16(core, D11REGOFFS(txe_phyctl),
+                    (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
        if (ISNPHY(pi) || ISLCNPHY(pi))
-               W_REG(&regs->txe_phyctl1, 0x1A02);
+               bcma_write16(core, D11REGOFFS(txe_phyctl1), 0x1A02);
 
-       W_REG(&regs->txe_wm_0, 0);
-       W_REG(&regs->txe_wm_1, 0);
+       bcma_write16(core, D11REGOFFS(txe_wm_0), 0);
+       bcma_write16(core, D11REGOFFS(txe_wm_1), 0);
 
-       W_REG(&regs->xmttplatetxptr, 0);
-       W_REG(&regs->xmttxcnt, DUMMY_PKT_LEN);
+       bcma_write16(core, D11REGOFFS(xmttplatetxptr), 0);
+       bcma_write16(core, D11REGOFFS(xmttxcnt), DUMMY_PKT_LEN);
 
-       W_REG(&regs->xmtsel, ((8 << 8) | (1 << 5) | (1 << 2) | 2));
+       bcma_write16(core, D11REGOFFS(xmtsel),
+                    ((8 << 8) | (1 << 5) | (1 << 2) | 2));
 
-       W_REG(&regs->txe_ctl, 0);
+       bcma_write16(core, D11REGOFFS(txe_ctl), 0);
 
        if (!pa_on) {
                if (ISNPHY(pi))
@@ -1060,27 +1037,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
        }
 
        if (ISNPHY(pi) || ISLCNPHY(pi))
-               W_REG(&regs->txe_aux, 0xD0);
+               bcma_write16(core, D11REGOFFS(txe_aux), 0xD0);
        else
-               W_REG(&regs->txe_aux, ((1 << 5) | (1 << 4)));
+               bcma_write16(core, D11REGOFFS(txe_aux), ((1 << 5) | (1 << 4)));
 
-       (void)R_REG(&regs->txe_aux);
+       (void)bcma_read16(core, D11REGOFFS(txe_aux));
 
        i = 0;
        count = ofdm ? 30 : 250;
        while ((i++ < count)
-              && (R_REG(&regs->txe_status) & (1 << 7)))
+              && (bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 7)))
                udelay(10);
 
        i = 0;
 
-       while ((i++ < 10)
-              && ((R_REG(&regs->txe_status) & (1 << 10)) == 0))
+       while ((i++ < 10) &&
+              ((bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 10)) == 0))
                udelay(10);
 
        i = 0;
 
-       while ((i++ < 10) && ((R_REG(&regs->ifsstat) & (1 << 8))))
+       while ((i++ < 10) &&
+              ((bcma_read16(core, D11REGOFFS(ifsstat)) & (1 << 8))))
                udelay(10);
 
        if (!pa_on) {
@@ -1137,7 +1115,7 @@ static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi)
 void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on)
 {
        struct brcms_phy *pi = (struct brcms_phy *) pih;
-       (void)R_REG(&pi->regs->maccontrol);
+       (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
 
        if (ISNPHY(pi)) {
                wlc_phy_switch_radio_nphy(pi, on);
@@ -1377,7 +1355,7 @@ void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
        memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM],
               &txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM);
 
-       if (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)
+       if (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
                mac_enabled = true;
 
        if (mac_enabled)
@@ -1407,7 +1385,8 @@ int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override)
                if (!SCAN_INPROG_PHY(pi)) {
                        bool suspend;
 
-                       suspend = (0 == (R_REG(&pi->regs->maccontrol) &
+                       suspend = (0 == (bcma_read32(pi->d11core,
+                                                    D11REGOFFS(maccontrol)) &
                                         MCTL_EN_MAC));
 
                        if (!suspend)
@@ -1860,18 +1839,17 @@ void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end)
 
                if (NREV_IS(pi->pubpi.phy_rev, 3)
                    || NREV_IS(pi->pubpi.phy_rev, 4)) {
-                       W_REG(&pi->regs->phyregaddr, 0xa0);
-                       (void)R_REG(&pi->regs->phyregaddr);
-                       rxc = R_REG(&pi->regs->phyregdata);
-                       W_REG(&pi->regs->phyregdata,
-                             (0x1 << 15) | rxc);
+                       bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+                                     0xa0);
+                       bcma_set16(pi->d11core, D11REGOFFS(phyregdata),
+                                  0x1 << 15);
                }
        } else {
                if (NREV_IS(pi->pubpi.phy_rev, 3)
                    || NREV_IS(pi->pubpi.phy_rev, 4)) {
-                       W_REG(&pi->regs->phyregaddr, 0xa0);
-                       (void)R_REG(&pi->regs->phyregaddr);
-                       W_REG(&pi->regs->phyregdata, rxc);
+                       bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+                                     0xa0);
+                       bcma_write16(pi->d11core, D11REGOFFS(phyregdata), rxc);
                }
 
                wlc_phy_por_inform(ppi);
@@ -1991,7 +1969,9 @@ void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl)
        pi->txpwrctrl = hwpwrctrl;
 
        if (ISNPHY(pi)) {
-               suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+               suspend = (0 == (bcma_read32(pi->d11core,
+                                            D11REGOFFS(maccontrol)) &
+                                MCTL_EN_MAC));
                if (!suspend)
                        wlapi_suspend_mac_and_wait(pi->sh->physhim);
 
@@ -2193,7 +2173,8 @@ void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val)
        if (!pi->sh->clk)
                return;
 
-       suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+       suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+                        MCTL_EN_MAC));
        if (!suspend)
                wlapi_suspend_mac_and_wait(pi->sh->physhim);
 
@@ -2411,8 +2392,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
                        wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
                        wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
 
-                       OR_REG(&pi->regs->maccommand,
-                              MCMD_BG_NOISE);
+                       bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+                                  MCMD_BG_NOISE);
                } else {
                        wlapi_suspend_mac_and_wait(pi->sh->physhim);
                        wlc_lcnphy_deaf_mode(pi, (bool) 0);
@@ -2430,8 +2411,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
                        wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
                        wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
 
-                       OR_REG(&pi->regs->maccommand,
-                              MCMD_BG_NOISE);
+                       bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+                                  MCMD_BG_NOISE);
                } else {
                        struct phy_iq_est est[PHY_CORE_MAX];
                        u32 cmplx_pwr[PHY_CORE_MAX];
@@ -2924,29 +2905,29 @@ void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
                                mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2);
 
                        }
-                       ai_corereg(pi->sh->sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, gpiocontrol),
-                                  ~0x0, 0x0);
-                       ai_corereg(pi->sh->sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, gpioout), 0x40,
-                                  0x40);
-                       ai_corereg(pi->sh->sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, gpioouten), 0x40,
-                                  0x40);
+                       ai_cc_reg(pi->sh->sih,
+                                 offsetof(struct chipcregs, gpiocontrol),
+                                 ~0x0, 0x0);
+                       ai_cc_reg(pi->sh->sih,
+                                 offsetof(struct chipcregs, gpioout),
+                                 0x40, 0x40);
+                       ai_cc_reg(pi->sh->sih,
+                                 offsetof(struct chipcregs, gpioouten),
+                                 0x40, 0x40);
                } else {
                        mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2);
 
                        mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2);
 
-                       ai_corereg(pi->sh->sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, gpioout), 0x40,
-                                  0x00);
-                       ai_corereg(pi->sh->sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, gpioouten), 0x40,
-                                  0x0);
-                       ai_corereg(pi->sh->sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, gpiocontrol),
-                                  ~0x0, 0x40);
+                       ai_cc_reg(pi->sh->sih,
+                                 offsetof(struct chipcregs, gpioout),
+                                 0x40, 0x00);
+                       ai_cc_reg(pi->sh->sih,
+                                 offsetof(struct chipcregs, gpioouten),
+                                 0x40, 0x0);
+                       ai_cc_reg(pi->sh->sih,
+                                 offsetof(struct chipcregs, gpiocontrol),
+                                 ~0x0, 0x40);
                }
        }
 }
index 96e1516..e34a71e 100644 (file)
@@ -166,7 +166,6 @@ struct shared_phy_params {
        struct phy_shim_info *physhim;
        uint unit;
        uint corerev;
-       uint buscorerev;
        u16 vid;
        u16 did;
        uint chip;
@@ -175,7 +174,6 @@ struct shared_phy_params {
        uint sromrev;
        uint boardtype;
        uint boardrev;
-       uint boardvendor;
        u32 boardflags;
        u32 boardflags2;
 };
@@ -183,7 +181,7 @@ struct shared_phy_params {
 
 extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
 extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
-                                           struct d11regs __iomem *regs,
+                                           struct bcma_device *d11core,
                                            int bandtype, struct wiphy *wiphy);
 extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
 
index 5f9478b..af00e2c 100644 (file)
@@ -503,10 +503,8 @@ struct shared_phy {
        uint sromrev;
        uint boardtype;
        uint boardrev;
-       uint boardvendor;
        u32 boardflags;
        u32 boardflags2;
-       uint buscorerev;
        uint fast_timer;
        uint slow_timer;
        uint glacial_timer;
@@ -559,7 +557,7 @@ struct brcms_phy {
        } u;
        bool user_txpwr_at_rfport;
 
-       struct d11regs __iomem *regs;
+       struct bcma_device *d11core;
        struct brcms_phy *next;
        struct brcms_phy_pub pubpi;
 
@@ -1090,7 +1088,7 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
 
 #define BRCMS_PHY_WAR_PR51571(pi) \
        if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
-               (void)R_REG(&(pi)->regs->maccontrol)
+               (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
 
 extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
 extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
index a63aa99..ce8562a 100644 (file)
@@ -1603,7 +1603,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
                si_pmu_pllupd(pi->sh->sih);
                write_phy_reg(pi, 0x942, 0);
                wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
-               pi_lcn->lcnphy_spurmod = 0;
+               pi_lcn->lcnphy_spurmod = false;
                mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8);
 
                write_phy_reg(pi, 0x425, 0x5907);
@@ -1616,7 +1616,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
                write_phy_reg(pi, 0x942, 0);
                wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
 
-               pi_lcn->lcnphy_spurmod = 0;
+               pi_lcn->lcnphy_spurmod = false;
                mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8);
 
                write_phy_reg(pi, 0x425, 0x590a);
@@ -2325,7 +2325,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
 {
        s8 index, delta_brd, delta_temp, new_index, tempcorrx;
        s16 manp, meas_temp, temp_diff;
-       bool neg = 0;
+       bool neg = false;
        u16 temp;
        struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
 
@@ -2348,7 +2348,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
        manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense);
        temp_diff = manp - meas_temp;
        if (temp_diff < 0) {
-               neg = 1;
+               neg = true;
                temp_diff = -temp_diff;
        }
 
@@ -2813,10 +2813,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
        u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
        u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
        idleTssi = read_phy_reg(pi, 0x4ab);
-       suspend =
-               (0 ==
-                (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) &
-                 MCTL_EN_MAC));
+       suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+                        MCTL_EN_MAC));
        if (!suspend)
                wlapi_suspend_mac_and_wait(pi->sh->physhim);
        wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
@@ -2890,7 +2888,8 @@ static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode)
 
        for (i = 0; i < 14; i++)
                values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]);
-       suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+       suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+                        MCTL_EN_MAC));
        if (!suspend)
                wlapi_suspend_mac_and_wait(pi->sh->physhim);
        save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4);
@@ -3016,8 +3015,8 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
        bool suspend;
        struct brcms_phy *pi = (struct brcms_phy *) ppi;
 
-       suspend =
-               (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+       suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+                        MCTL_EN_MAC));
        if (!suspend)
                wlapi_suspend_mac_and_wait(pi->sh->physhim);
 
@@ -3535,15 +3534,17 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
        timer = 0;
        old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
 
-       curval1 = R_REG(&pi->regs->psm_corectlsts);
+       curval1 = bcma_read16(pi->d11core, D11REGOFFS(psm_corectlsts));
        ptr[130] = 0;
-       W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1));
+       bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts),
+                    ((1 << 6) | curval1));
 
-       W_REG(&pi->regs->smpl_clct_strptr, 0x7E00);
-       W_REG(&pi->regs->smpl_clct_stpptr, 0x8000);
+       bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_strptr), 0x7E00);
+       bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_stpptr), 0x8000);
        udelay(20);
-       curval2 = R_REG(&pi->regs->psm_phy_hdr_param);
-       W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30);
+       curval2 = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+       bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
+                    curval2 | 0x30);
 
        write_phy_reg(pi, 0x555, 0x0);
        write_phy_reg(pi, 0x5a6, 0x5);
@@ -3560,19 +3561,19 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
 
        sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
        write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008));
-       stpptr = R_REG(&pi->regs->smpl_clct_stpptr);
-       curptr = R_REG(&pi->regs->smpl_clct_curptr);
+       stpptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_stpptr));
+       curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
        do {
                udelay(10);
-               curptr = R_REG(&pi->regs->smpl_clct_curptr);
+               curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
                timer++;
        } while ((curptr != stpptr) && (timer < 500));
 
-       W_REG(&pi->regs->psm_phy_hdr_param, 0x2);
+       bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), 0x2);
        strptr = 0x7E00;
-       W_REG(&pi->regs->tplatewrptr, strptr);
+       bcma_write32(pi->d11core, D11REGOFFS(tplatewrptr), strptr);
        while (strptr < 0x8000) {
-               val = R_REG(&pi->regs->tplatewrdata);
+               val = bcma_read32(pi->d11core, D11REGOFFS(tplatewrdata));
                imag = ((val >> 16) & 0x3ff);
                real = ((val) & 0x3ff);
                if (imag > 511)
@@ -3597,8 +3598,8 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
        }
 
        write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
-       W_REG(&pi->regs->psm_phy_hdr_param, curval2);
-       W_REG(&pi->regs->psm_corectlsts, curval1);
+       bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), curval2);
+       bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), curval1);
 }
 
 static void
@@ -3681,8 +3682,8 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
        wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
        udelay(20);
        for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) {
-               phy_c23 = 1;
-               phy_c22 = 0;
+               phy_c23 = true;
+               phy_c22 = false;
                switch (cal_type) {
                case 0:
                        phy_c10 = 511;
@@ -3700,18 +3701,18 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
 
                phy_c9 = read_phy_reg(pi, 0x93d);
                phy_c9 = 2 * phy_c9;
-               phy_c24 = 0;
+               phy_c24 = false;
                phy_c5 = 7;
-               phy_c25 = 1;
+               phy_c25 = true;
                while (1) {
                        write_radio_reg(pi, RADIO_2064_REG026,
                                        (phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4));
                        udelay(50);
-                       phy_c22 = 0;
+                       phy_c22 = false;
                        ptr[130] = 0;
                        wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2);
                        if (ptr[130] == 1)
-                               phy_c22 = 1;
+                               phy_c22 = true;
                        if (phy_c22)
                                phy_c5 -= 1;
                        if ((phy_c22 != phy_c24) && (!phy_c25))
@@ -3721,7 +3722,7 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
                        if (phy_c5 <= 0 || phy_c5 >= 7)
                                break;
                        phy_c24 = phy_c22;
-                       phy_c25 = 0;
+                       phy_c25 = false;
                }
 
                if (phy_c5 < 0)
@@ -3772,10 +3773,10 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
                                        phy_c13 = phy_c11;
                                        phy_c14 = phy_c12;
                                }
-                               phy_c23 = 0;
+                               phy_c23 = false;
                        }
                }
-               phy_c23 = 1;
+               phy_c23 = true;
                phy_c15 = phy_c13;
                phy_c16 = phy_c14;
                phy_c7 = phy_c7 >> 1;
@@ -3965,12 +3966,12 @@ s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode)
 {
        u16 tempsenseval1, tempsenseval2;
        s16 avg = 0;
-       bool suspend = 0;
+       bool suspend = false;
 
        if (mode == 1) {
-               suspend =
-                       (0 ==
-                        (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+               suspend = (0 == (bcma_read32(pi->d11core,
+                                            D11REGOFFS(maccontrol)) &
+                                MCTL_EN_MAC));
                if (!suspend)
                        wlapi_suspend_mac_and_wait(pi->sh->physhim);
                wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4007,14 +4008,14 @@ u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode)
 {
        u16 tempsenseval1, tempsenseval2;
        s32 avg = 0;
-       bool suspend = 0;
+       bool suspend = false;
        u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
        struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
 
        if (mode == 1) {
-               suspend =
-                       (0 ==
-                        (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+               suspend = (0 == (bcma_read32(pi->d11core,
+                                            D11REGOFFS(maccontrol)) &
+                                MCTL_EN_MAC));
                if (!suspend)
                        wlapi_suspend_mac_and_wait(pi->sh->physhim);
                wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4075,12 +4076,12 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
 {
        u16 vbatsenseval;
        s32 avg = 0;
-       bool suspend = 0;
+       bool suspend = false;
 
        if (mode == 1) {
-               suspend =
-                       (0 ==
-                        (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+               suspend = (0 == (bcma_read32(pi->d11core,
+                                            D11REGOFFS(maccontrol)) &
+                                MCTL_EN_MAC));
                if (!suspend)
                        wlapi_suspend_mac_and_wait(pi->sh->physhim);
                wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE);
@@ -4127,8 +4128,8 @@ static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
        s8 index;
        u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
        struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
-       suspend =
-               (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+       suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+                        MCTL_EN_MAC));
        if (!suspend)
                wlapi_suspend_mac_and_wait(pi->sh->physhim);
        wlc_lcnphy_deaf_mode(pi, true);
@@ -4166,8 +4167,8 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
        pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
        index = pi_lcn->lcnphy_current_index;
 
-       suspend =
-               (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+       suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+                        MCTL_EN_MAC));
        if (!suspend) {
                wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
                wlapi_suspend_mac_and_wait(pi->sh->physhim);
index ec9b566..a16f1ab 100644 (file)
@@ -17802,7 +17802,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
 
        if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
                wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
-               (void)R_REG(&pi->regs->maccontrol);
+               (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
                udelay(1);
        }
 
@@ -17953,7 +17953,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
 
        if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
                wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
-               (void)R_REG(&pi->regs->maccontrol);
+               (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
                udelay(1);
        }
 
@@ -19447,8 +19447,6 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
        u8 tx_pwr_ctrl_state;
        bool do_nphy_cal = false;
        uint core;
-       uint origidx, intr_val;
-       struct d11regs __iomem *regs;
        u32 d11_clk_ctl_st;
        bool do_rssi_cal = false;
 
@@ -19462,25 +19460,21 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
             (pi->sh->chippkg == BCM4718_PKG_ID))) {
                if ((pi->sh->boardflags & BFL_EXTLNA) &&
                    (CHSPEC_IS2G(pi->radio_chanspec)))
-                       ai_corereg(pi->sh->sih, SI_CC_IDX,
-                                  offsetof(struct chipcregs, chipcontrol),
-                                  0x40, 0x40);
+                       ai_cc_reg(pi->sh->sih,
+                                 offsetof(struct chipcregs, chipcontrol),
+                                 0x40, 0x40);
        }
 
        if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
            CHSPEC_IS40(pi->radio_chanspec)) {
 
-               regs = (struct d11regs __iomem *)
-                               ai_switch_core(pi->sh->sih,
-                                              D11_CORE_ID, &origidx,
-                                              &intr_val);
-               d11_clk_ctl_st = R_REG(&regs->clk_ctl_st);
-               AND_REG(&regs->clk_ctl_st,
-                       ~(CCS_FORCEHT | CCS_HTAREQ));
+               d11_clk_ctl_st = bcma_read32(pi->d11core,
+                                            D11REGOFFS(clk_ctl_st));
+               bcma_mask32(pi->d11core, D11REGOFFS(clk_ctl_st),
+                           ~(CCS_FORCEHT | CCS_HTAREQ));
 
-               W_REG(&regs->clk_ctl_st, d11_clk_ctl_st);
-
-               ai_restore_core(pi->sh->sih, origidx, intr_val);
+               bcma_write32(pi->d11core, D11REGOFFS(clk_ctl_st),
+                            d11_clk_ctl_st);
        }
 
        pi->use_int_tx_iqlo_cal_nphy =
@@ -19885,7 +19879,8 @@ void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask)
        if (!pi->sh->clk)
                return;
 
-       suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+       suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+                        MCTL_EN_MAC));
        if (!suspend)
                wlapi_suspend_mac_and_wait(pi->sh->physhim);
 
@@ -21263,28 +21258,28 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
        val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
        if (CHSPEC_IS5G(chanspec) && !val) {
 
-               val = R_REG(&pi->regs->psm_phy_hdr_param);
-               W_REG(&pi->regs->psm_phy_hdr_param,
+               val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+               bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
                      (val | MAC_PHY_FORCE_CLK));
 
                or_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
                           (BBCFG_RESETCCA | BBCFG_RESETRX));
 
-               W_REG(&pi->regs->psm_phy_hdr_param, val);
+               bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
 
                or_phy_reg(pi, 0x09, NPHY_BandControl_currentBand);
        } else if (!CHSPEC_IS5G(chanspec) && val) {
 
                and_phy_reg(pi, 0x09, ~NPHY_BandControl_currentBand);
 
-               val = R_REG(&pi->regs->psm_phy_hdr_param);
-               W_REG(&pi->regs->psm_phy_hdr_param,
+               val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+               bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
                      (val | MAC_PHY_FORCE_CLK));
 
                and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
                            (u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX)));
 
-               W_REG(&pi->regs->psm_phy_hdr_param, val);
+               bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
        }
 
        write_phy_reg(pi, 0x1ce, ci->PHY_BW1a);
@@ -21342,24 +21337,23 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
                        spuravoid = 1;
 
                wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
-               si_pmu_spuravoid(pi->sh->sih, spuravoid);
+               si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid);
                wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
 
                if ((pi->sh->chip == BCM43224_CHIP_ID) ||
                    (pi->sh->chip == BCM43225_CHIP_ID)) {
-
                        if (spuravoid == 1) {
-
-                               W_REG(&pi->regs->tsf_clk_frac_l,
-                                     0x5341);
-                               W_REG(&pi->regs->tsf_clk_frac_h,
-                                     0x8);
+                               bcma_write16(pi->d11core,
+                                            D11REGOFFS(tsf_clk_frac_l),
+                                            0x5341);
+                               bcma_write16(pi->d11core,
+                                            D11REGOFFS(tsf_clk_frac_h), 0x8);
                        } else {
-
-                               W_REG(&pi->regs->tsf_clk_frac_l,
-                                     0x8889);
-                               W_REG(&pi->regs->tsf_clk_frac_h,
-                                     0x8);
+                               bcma_write16(pi->d11core,
+                                            D11REGOFFS(tsf_clk_frac_l),
+                                            0x8889);
+                               bcma_write16(pi->d11core,
+                                            D11REGOFFS(tsf_clk_frac_h), 0x8);
                        }
                }
 
@@ -21499,13 +21493,13 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
 
                ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY);
 
-               mc = R_REG(&pi->regs->maccontrol);
+               mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
                mc &= ~MCTL_GPOUT_SEL_MASK;
-               W_REG(&pi->regs->maccontrol, mc);
+               bcma_write32(pi->d11core, D11REGOFFS(maccontrol), mc);
 
-               OR_REG(&pi->regs->psm_gpio_oe, mask);
+               bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask);
 
-               AND_REG(&pi->regs->psm_gpio_out, ~mask);
+               bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask);
 
                if (lut_init) {
                        write_phy_reg(pi, 0xf8, 0x02d8);
@@ -21522,9 +21516,8 @@ u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val)
        bool suspended = false;
 
        if (D11REV_IS(pi->sh->corerev, 16)) {
-               suspended =
-                       (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) ?
-                       false : true;
+               suspended = (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+                            MCTL_EN_MAC) ? false : true;
                if (!suspended)
                        wlapi_suspend_mac_and_wait(pi->sh->physhim);
        }
@@ -25383,7 +25376,8 @@ static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal)
        if (pi->nphy_papd_skip == 1)
                return;
 
-       phy_b3 = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+       phy_b3 = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+                       MCTL_EN_MAC));
        if (!phy_b3)
                wlapi_suspend_mac_and_wait(pi->sh->physhim);
 
@@ -28357,7 +28351,7 @@ void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi)
 
        if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
                wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
-               (void)R_REG(&pi->regs->maccontrol);
+               (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
                udelay(1);
        }
 
index 12ba575..4931d29 100644 (file)
@@ -115,10 +115,10 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
        uint rsrcs;
 
        /* # resources */
-       rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+       rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
 
        /* determine min/max rsrc masks */
-       switch (sih->chip) {
+       switch (ai_get_chip_id(sih)) {
        case BCM43224_CHIP_ID:
        case BCM43225_CHIP_ID:
                /* ??? */
@@ -139,75 +139,84 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
        *pmax = max_mask;
 }
 
-static void
-si_pmu_spuravoid_pllupdate(struct si_pub *sih, struct chipcregs __iomem *cc,
-                          u8 spuravoid)
+void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid)
 {
        u32 tmp = 0;
+       struct bcma_device *core;
 
-       switch (sih->chip) {
+       /* switch to chipc */
+       core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+
+       switch (ai_get_chip_id(sih)) {
        case BCM43224_CHIP_ID:
        case BCM43225_CHIP_ID:
                if (spuravoid == 1) {
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
-                       W_REG(&cc->pllcontrol_data, 0x11500010);
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
-                       W_REG(&cc->pllcontrol_data, 0x000C0C06);
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
-                       W_REG(&cc->pllcontrol_data, 0x0F600a08);
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
-                       W_REG(&cc->pllcontrol_data, 0x00000000);
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
-                       W_REG(&cc->pllcontrol_data, 0x2001E920);
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
-                       W_REG(&cc->pllcontrol_data, 0x88888815);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL0);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x11500010);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL1);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x000C0C06);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL2);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x0F600a08);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL3);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x00000000);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL4);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x2001E920);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL5);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x88888815);
                } else {
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
-                       W_REG(&cc->pllcontrol_data, 0x11100010);
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
-                       W_REG(&cc->pllcontrol_data, 0x000c0c06);
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
-                       W_REG(&cc->pllcontrol_data, 0x03000a08);
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
-                       W_REG(&cc->pllcontrol_data, 0x00000000);
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
-                       W_REG(&cc->pllcontrol_data, 0x200005c0);
-                       W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
-                       W_REG(&cc->pllcontrol_data, 0x88888815);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL0);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x11100010);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL1);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x000c0c06);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL2);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x03000a08);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL3);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x00000000);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL4);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x200005c0);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+                                    PMU1_PLL0_PLLCTL5);
+                       bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+                                    0x88888815);
                }
                tmp = 1 << 10;
                break;
 
-               W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
-               W_REG(&cc->pllcontrol_data, 0x11100008);
-               W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
-               W_REG(&cc->pllcontrol_data, 0x0c000c06);
-               W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
-               W_REG(&cc->pllcontrol_data, 0x03000a08);
-               W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
-               W_REG(&cc->pllcontrol_data, 0x00000000);
-               W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
-               W_REG(&cc->pllcontrol_data, 0x200005c0);
-               W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
-               W_REG(&cc->pllcontrol_data, 0x88888855);
-
-               tmp = 1 << 10;
-               break;
-
        default:
                /* bail out */
                return;
        }
 
-       tmp |= R_REG(&cc->pmucontrol);
-       W_REG(&cc->pmucontrol, tmp);
+       bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp);
 }
 
 u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
 {
        uint delay = PMU_MAX_TRANSITION_DLY;
 
-       switch (sih->chip) {
+       switch (ai_get_chip_id(sih)) {
        case BCM43224_CHIP_ID:
        case BCM43225_CHIP_ID:
        case BCM4313_CHIP_ID:
@@ -220,54 +229,35 @@ u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
        return (u16) delay;
 }
 
-void si_pmu_sprom_enable(struct si_pub *sih, bool enable)
-{
-       struct chipcregs __iomem *cc;
-       uint origidx;
-
-       /* Remember original core before switch to chipc */
-       origidx = ai_coreidx(sih);
-       cc = ai_setcoreidx(sih, SI_CC_IDX);
-
-       /* Return to original core */
-       ai_setcoreidx(sih, origidx);
-}
-
 /* Read/write a chipcontrol reg */
 u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
 {
-       ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol_addr),
-                  ~0, reg);
-       return ai_corereg(sih, SI_CC_IDX,
-                         offsetof(struct chipcregs, chipcontrol_data), mask,
-                         val);
+       ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_addr), ~0, reg);
+       return ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_data),
+                        mask, val);
 }
 
 /* Read/write a regcontrol reg */
 u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
 {
-       ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, regcontrol_addr),
-                  ~0, reg);
-       return ai_corereg(sih, SI_CC_IDX,
-                         offsetof(struct chipcregs, regcontrol_data), mask,
-                         val);
+       ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_addr), ~0, reg);
+       return ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_data),
+                        mask, val);
 }
 
 /* Read/write a pllcontrol reg */
 u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
 {
-       ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pllcontrol_addr),
-                  ~0, reg);
-       return ai_corereg(sih, SI_CC_IDX,
-                         offsetof(struct chipcregs, pllcontrol_data), mask,
-                         val);
+       ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_addr), ~0, reg);
+       return ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_data),
+                        mask, val);
 }
 
 /* PMU PLL update */
 void si_pmu_pllupd(struct si_pub *sih)
 {
-       ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pmucontrol),
-                  PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
+       ai_cc_reg(sih, offsetof(struct chipcregs, pmucontrol),
+                 PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
 }
 
 /* query alp/xtal clock frequency */
@@ -276,10 +266,10 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
        u32 clock = ALP_CLOCK;
 
        /* bail out with default */
-       if (!(sih->cccaps & CC_CAP_PMU))
+       if (!(ai_get_cccaps(sih) & CC_CAP_PMU))
                return clock;
 
-       switch (sih->chip) {
+       switch (ai_get_chip_id(sih)) {
        case BCM43224_CHIP_ID:
        case BCM43225_CHIP_ID:
        case BCM4313_CHIP_ID:
@@ -293,95 +283,29 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
        return clock;
 }
 
-void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid)
-{
-       struct chipcregs __iomem *cc;
-       uint origidx, intr_val;
-
-       /* Remember original core before switch to chipc */
-       cc = (struct chipcregs __iomem *)
-                       ai_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
-
-       /* update the pll changes */
-       si_pmu_spuravoid_pllupdate(sih, cc, spuravoid);
-
-       /* Return to original core */
-       ai_restore_core(sih, origidx, intr_val);
-}
-
 /* initialize PMU */
 void si_pmu_init(struct si_pub *sih)
 {
-       struct chipcregs __iomem *cc;
-       uint origidx;
+       struct bcma_device *core;
 
-       /* Remember original core before switch to chipc */
-       origidx = ai_coreidx(sih);
-       cc = ai_setcoreidx(sih, SI_CC_IDX);
-
-       if (sih->pmurev == 1)
-               AND_REG(&cc->pmucontrol, ~PCTL_NOILP_ON_WAIT);
-       else if (sih->pmurev >= 2)
-               OR_REG(&cc->pmucontrol, PCTL_NOILP_ON_WAIT);
+       /* select chipc */
+       core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
 
-       /* Return to original core */
-       ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU chip controls and other chip level stuff */
-void si_pmu_chip_init(struct si_pub *sih)
-{
-       uint origidx;
-
-       /* Gate off SPROM clock and chip select signals */
-       si_pmu_sprom_enable(sih, false);
-
-       /* Remember original core */
-       origidx = ai_coreidx(sih);
-
-       /* Return to original core */
-       ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU switch/regulators */
-void si_pmu_swreg_init(struct si_pub *sih)
-{
-}
-
-/* initialize PLL */
-void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq)
-{
-       struct chipcregs __iomem *cc;
-       uint origidx;
-
-       /* Remember original core before switch to chipc */
-       origidx = ai_coreidx(sih);
-       cc = ai_setcoreidx(sih, SI_CC_IDX);
-
-       switch (sih->chip) {
-       case BCM4313_CHIP_ID:
-       case BCM43224_CHIP_ID:
-       case BCM43225_CHIP_ID:
-               /* ??? */
-               break;
-       default:
-               break;
-       }
-
-       /* Return to original core */
-       ai_setcoreidx(sih, origidx);
+       if (ai_get_pmurev(sih) == 1)
+               bcma_mask32(core, CHIPCREGOFFS(pmucontrol),
+                           ~PCTL_NOILP_ON_WAIT);
+       else if (ai_get_pmurev(sih) >= 2)
+               bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT);
 }
 
 /* initialize PMU resources */
 void si_pmu_res_init(struct si_pub *sih)
 {
-       struct chipcregs __iomem *cc;
-       uint origidx;
+       struct bcma_device *core;
        u32 min_mask = 0, max_mask = 0;
 
-       /* Remember original core before switch to chipc */
-       origidx = ai_coreidx(sih);
-       cc = ai_setcoreidx(sih, SI_CC_IDX);
+       /* select to chipc */
+       core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
 
        /* Determine min/max rsrc masks */
        si_pmu_res_masks(sih, &min_mask, &max_mask);
@@ -391,55 +315,50 @@ void si_pmu_res_init(struct si_pub *sih)
        /* Program max resource mask */
 
        if (max_mask)
-               W_REG(&cc->max_res_mask, max_mask);
+               bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask);
 
        /* Program min resource mask */
 
        if (min_mask)
-               W_REG(&cc->min_res_mask, min_mask);
+               bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask);
 
        /* Add some delay; allow resources to come up and settle. */
        mdelay(2);
-
-       /* Return to original core */
-       ai_setcoreidx(sih, origidx);
 }
 
 u32 si_pmu_measure_alpclk(struct si_pub *sih)
 {
-       struct chipcregs __iomem *cc;
-       uint origidx;
+       struct bcma_device *core;
        u32 alp_khz;
 
-       if (sih->pmurev < 10)
+       if (ai_get_pmurev(sih) < 10)
                return 0;
 
        /* Remember original core before switch to chipc */
-       origidx = ai_coreidx(sih);
-       cc = ai_setcoreidx(sih, SI_CC_IDX);
+       core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
 
-       if (R_REG(&cc->pmustatus) & PST_EXTLPOAVAIL) {
+       if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) {
                u32 ilp_ctr, alp_hz;
 
                /*
                 * Enable the reg to measure the freq,
                 * in case it was disabled before
                 */
-               W_REG(&cc->pmu_xtalfreq,
-                     1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
+               bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq),
+                           1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
 
                /* Delay for well over 4 ILP clocks */
                udelay(1000);
 
                /* Read the latched number of ALP ticks per 4 ILP ticks */
-               ilp_ctr =
-                   R_REG(&cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK;
+               ilp_ctr = bcma_read32(core, CHIPCREGOFFS(pmu_xtalfreq)) &
+                         PMU_XTALFREQ_REG_ILPCTR_MASK;
 
                /*
                 * Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT
                 * bit to save power
                 */
-               W_REG(&cc->pmu_xtalfreq, 0);
+               bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq), 0);
 
                /* Calculate ALP frequency */
                alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4;
@@ -452,8 +371,5 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih)
        } else
                alp_khz = 0;
 
-       /* Return to original core */
-       ai_setcoreidx(sih, origidx);
-
        return alp_khz;
 }
index 3a08c62..3e39c5e 100644 (file)
@@ -26,13 +26,10 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
 extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
 extern u32 si_pmu_alp_clock(struct si_pub *sih);
 extern void si_pmu_pllupd(struct si_pub *sih);
-extern void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid);
+extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid);
 extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
 extern void si_pmu_init(struct si_pub *sih);
-extern void si_pmu_chip_init(struct si_pub *sih);
-extern void si_pmu_pll_init(struct si_pub *sih, u32 xtalfreq);
 extern void si_pmu_res_init(struct si_pub *sih);
-extern void si_pmu_swreg_init(struct si_pub *sih);
 extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
 
 #endif /* _BRCM_PMU_H_ */
index 21ccf3a..f0038ad 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef _BRCM_PUB_H_
 #define _BRCM_PUB_H_
 
+#include <linux/bcma/bcma.h>
 #include <brcmu_wifi.h>
 #include "types.h"
 #include "defs.h"
@@ -530,9 +531,8 @@ struct brcms_antselcfg {
 
 /* common functions for every port */
 extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
-              bool piomode, void __iomem *regsva, struct pci_dev *btparam,
-              uint *perr);
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+              bool piomode, uint *perr);
 extern uint brcms_c_detach(struct brcms_c_info *wlc);
 extern int brcms_c_up(struct brcms_c_info *wlc);
 extern uint brcms_c_down(struct brcms_c_info *wlc);
index b6987ea..6109215 100644 (file)
@@ -586,17 +586,6 @@ static const struct brcms_sromvar perpath_pci_sromvars[] = {
  * shared between devices. */
 static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE];
 
-static u8 __iomem *
-srom_window_address(struct si_pub *sih, u8 __iomem *curmap)
-{
-       if (sih->ccrev < 32)
-               return curmap + PCI_BAR0_SPROM_OFFSET;
-       if (sih->cccaps & CC_CAP_SROM)
-               return curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP;
-
-       return NULL;
-}
-
 static uint mask_shift(u16 mask)
 {
        uint i;
@@ -779,17 +768,27 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
  * Return 0 on success, nonzero on error.
  */
 static int
-sprom_read_pci(struct si_pub *sih, u8 __iomem *sprom, uint wordoff,
-              u16 *buf, uint nwords, bool check_crc)
+sprom_read_pci(struct si_pub *sih, u16 *buf, uint nwords, bool check_crc)
 {
        int err = 0;
        uint i;
        u8 *bbuf = (u8 *)buf; /* byte buffer */
        uint nbytes = nwords << 1;
+       struct bcma_device *core;
+       uint sprom_offset;
+
+       /* determine core to read */
+       if (ai_get_ccrev(sih) < 32) {
+               core = ai_findcore(sih, BCMA_CORE_80211, 0);
+               sprom_offset = PCI_BAR0_SPROM_OFFSET;
+       } else {
+               core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+               sprom_offset = CHIPCREGOFFS(sromotp);
+       }
 
        /* read the sprom in bytes */
        for (i = 0; i < nbytes; i++)
-               bbuf[i] = readb(sprom+i);
+               bbuf[i] = bcma_read8(core, sprom_offset+i);
 
        if (buf[0] == 0xffff)
                /*
@@ -851,10 +850,9 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords)
  * Initialize nonvolatile variable table from sprom.
  * Return 0 on success, nonzero on error.
  */
-static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
+int srom_var_init(struct si_pub *sih)
 {
        u16 *srom;
-       u8 __iomem *sromwindow;
        u8 sromrev = 0;
        u32 sr;
        int err = 0;
@@ -866,12 +864,9 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
        if (!srom)
                return -ENOMEM;
 
-       sromwindow = srom_window_address(sih, curmap);
-
        crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY);
        if (ai_is_sprom_available(sih)) {
-               err = sprom_read_pci(sih, sromwindow, 0, srom,
-                                    SROM4_WORDS, true);
+               err = sprom_read_pci(sih, srom, SROM4_WORDS, true);
 
                if (err == 0)
                        /* srom read and passed crc */
@@ -921,21 +916,6 @@ void srom_free_vars(struct si_pub *sih)
                kfree(entry);
        }
 }
-/*
- * Initialize local vars from the right source for this platform.
- * Return 0 on success, nonzero on error.
- */
-int srom_var_init(struct si_pub *sih, void __iomem *curmap)
-{
-       uint len;
-
-       len = 0;
-
-       if (curmap != NULL)
-               return initvars_srom_pci(sih, curmap);
-
-       return -EINVAL;
-}
 
 /*
  * Search the name=value vars for a specific one and return its value.
index c81df97..f2a58f2 100644 (file)
@@ -20,7 +20,7 @@
 #include "types.h"
 
 /* Prototypes */
-extern int srom_var_init(struct si_pub *sih, void __iomem *curmap);
+extern int srom_var_init(struct si_pub *sih);
 extern void srom_free_vars(struct si_pub *sih);
 
 extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
index 27a814b..e11ae83 100644 (file)
@@ -250,66 +250,18 @@ do {                                              \
                wiphy_err(dev, "%s: " fmt, __func__, ##args);   \
 } while (0)
 
-/*
- * Register access macros.
- *
- * These macro's take a pointer to the address to read as one of their
- * arguments. The macro itself deduces the size of the IO transaction (u8, u16
- * or u32). Advantage of this approach in combination with using a struct to
- * define the registers in a register block, is that access size and access
- * location are defined in only one spot. This reduces the risk of the
- * programmer trying to use an unsupported transaction size on a register.
- *
- */
-
-#define R_REG(r) \
-       ({ \
-               __typeof(*(r)) __osl_v; \
-               switch (sizeof(*(r))) { \
-               case sizeof(u8): \
-                       __osl_v = readb((u8 __iomem *)(r)); \
-                       break; \
-               case sizeof(u16): \
-                       __osl_v = readw((u16 __iomem *)(r)); \
-                       break; \
-               case sizeof(u32): \
-                       __osl_v = readl((u32 __iomem *)(r)); \
-                       break; \
-               } \
-               __osl_v; \
-       })
-
-#define W_REG(r, v) do { \
-               switch (sizeof(*(r))) { \
-               case sizeof(u8):        \
-                       writeb((u8)((v) & 0xFF), (u8 __iomem *)(r)); \
-                       break; \
-               case sizeof(u16):       \
-                       writew((u16)((v) & 0xFFFF), (u16 __iomem *)(r)); \
-                       break; \
-               case sizeof(u32):       \
-                       writel((u32)(v), (u32 __iomem *)(r)); \
-                       break; \
-               } \
-       } while (0)
-
 #ifdef CONFIG_BCM47XX
 /*
  * bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder
  * transactions. As a fix, a read after write is performed on certain places
  * in the code. Older chips and the newer 5357 family don't require this fix.
  */
-#define W_REG_FLUSH(r, v)      ({ W_REG((r), (v)); (void)R_REG(r); })
+#define bcma_wflush16(c, o, v) \
+       ({ bcma_write16(c, o, v); (void)bcma_read16(c, o); })
 #else
-#define W_REG_FLUSH(r, v)      W_REG((r), (v))
+#define bcma_wflush16(c, o, v) bcma_write16(c, o, v)
 #endif                         /* CONFIG_BCM47XX */
 
-#define AND_REG(r, v)  W_REG((r), R_REG(r) & (v))
-#define OR_REG(r, v)   W_REG((r), R_REG(r) | (v))
-
-#define SET_REG(r, mask, val) \
-               W_REG((r), ((R_REG(r) & ~(mask)) | (val)))
-
 /* multi-bool data type: set of bools, mbool is true if any is set */
 
 /* set one bool */
index fefabc3..f96834a 100644 (file)
@@ -19,6 +19,8 @@
 
 #include "defs.h"              /* for PAD macro */
 
+#define CHIPCREGOFFS(field)    offsetof(struct chipcregs, field)
+
 struct chipcregs {
        u32 chipid;             /* 0x0 */
        u32 capabilities;
index 5441ad1..89e9d3a 100644 (file)
@@ -655,6 +655,9 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
        PCMCIA_DEVICE_PROD_ID123(
                "Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
                0xe6ec52ce, 0x08649af2, 0x4b74baa0),
+       PCMCIA_DEVICE_PROD_ID123(
+               "Canon", "Wireless LAN CF Card K30225", "Version 01.00",
+               0x96ef6fe2, 0x263fcbab, 0xa57adb8c),
        PCMCIA_DEVICE_PROD_ID123(
                "D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
                0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
index 7e2924f..881ba04 100644 (file)
@@ -2786,9 +2786,8 @@ il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
        /* Driver ilate data, only for Tx (not command) queues,
         * not shared with device. */
        if (id != il->cmd_queue) {
-               txq->txb =
-                   kzalloc(sizeof(txq->txb[0]) * TFD_QUEUE_SIZE_MAX,
-                           GFP_KERNEL);
+               txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]),
+                                  GFP_KERNEL);
                if (!txq->txb) {
                        IL_ERR("kmalloc for auxiliary BD "
                               "structures failed\n");
index 57703d5..ae08498 100644 (file)
@@ -102,12 +102,28 @@ config IWLWIFI_DEVICE_TRACING
          occur.
 endmenu
 
-config IWLWIFI_DEVICE_SVTOOL
-       bool "iwlwifi device svtool support"
+config IWLWIFI_DEVICE_TESTMODE
+       def_bool y
        depends on IWLWIFI
-       select NL80211_TESTMODE
+       depends on NL80211_TESTMODE
        help
-         This option enables the svtool support for iwlwifi device through
-         NL80211_TESTMODE. svtool is a software validation tool that runs in
-         the user space and interacts with the device in the kernel space
-         through the generic netlink message via NL80211_TESTMODE channel.
+         This option enables the testmode support for iwlwifi device through
+         NL80211_TESTMODE. This provide the capabilities of enable user space
+         validation applications to interacts with the device through the
+         generic netlink message via NL80211_TESTMODE channel.
+
+config IWLWIFI_P2P
+       bool "iwlwifi experimental P2P support"
+       depends on IWLWIFI
+       help
+         This option enables experimental P2P support for some devices
+         based on microcode support. Since P2P support is still under
+         development, this option may even enable it for some devices
+         now that turn out to not support it in the future due to
+         microcode restrictions.
+
+         To determine if your microcode supports the experimental P2P
+         offered by this option, check if the driver advertises AP
+         support when it is loaded.
+
+         Say Y only if you want to experiment with P2P.
index a7ab280..9dc84a7 100644 (file)
@@ -1,7 +1,7 @@
 # WIFI
 obj-$(CONFIG_IWLWIFI)  += iwlwifi.o
 iwlwifi-objs           := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
-iwlwifi-objs           += iwl-agn-ucode.o iwl-agn-tx.o
+iwlwifi-objs           += iwl-ucode.o iwl-agn-tx.o
 iwlwifi-objs           += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
 iwlwifi-objs           += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
 
@@ -18,7 +18,7 @@ iwlwifi-objs          += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
 
 iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
 
 CFLAGS_iwl-devtrace.o := -I$(src)
 
index e12b48c..8d3bad7 100644 (file)
@@ -147,16 +147,7 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
        iwl1000_set_ct_threshold(priv);
 
        /* Set initial sensitivity parameters */
-       /* Set initial calibration set */
        hw_params(priv).sens = &iwl1000_sensitivity;
-       hw_params(priv).calib_init_cfg =
-                       BIT(IWL_CALIB_XTAL)             |
-                       BIT(IWL_CALIB_LO)               |
-                       BIT(IWL_CALIB_TX_IQ)            |
-                       BIT(IWL_CALIB_TX_IQ_PERD)       |
-                       BIT(IWL_CALIB_BASE_BAND);
-       if (priv->cfg->need_dc_calib)
-               hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
 
        return 0;
 }
@@ -191,6 +182,7 @@ static struct iwl_base_params iwl1000_base_params = {
        .chain_noise_scale = 1000,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 128,
+       .wd_disable = true,
 };
 static struct iwl_ht_params iwl1000_ht_params = {
        .ht_greenfield_support = true,
index b319357..0c4688d 100644 (file)
@@ -143,17 +143,7 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
        iwl2000_set_ct_threshold(priv);
 
        /* Set initial sensitivity parameters */
-       /* Set initial calibration set */
        hw_params(priv).sens = &iwl2000_sensitivity;
-       hw_params(priv).calib_init_cfg =
-               BIT(IWL_CALIB_XTAL)             |
-               BIT(IWL_CALIB_LO)               |
-               BIT(IWL_CALIB_TX_IQ)            |
-               BIT(IWL_CALIB_BASE_BAND);
-       if (priv->cfg->need_dc_calib)
-               hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
-       if (priv->cfg->need_temp_offset_calib)
-               hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
 
        return 0;
 }
@@ -258,7 +248,6 @@ static struct iwl_bt_params iwl2030_bt_params = {
        .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
        .lib = &iwl2000_lib,                                    \
        .base_params = &iwl2000_base_params,                    \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .temp_offset_v2 = true,                                 \
        .led_mode = IWL_LED_RF_STATE,                           \
@@ -286,7 +275,6 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
        .lib = &iwl2030_lib,                                    \
        .base_params = &iwl2030_base_params,                    \
        .bt_params = &iwl2030_bt_params,                        \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .temp_offset_v2 = true,                                 \
        .led_mode = IWL_LED_RF_STATE,                           \
@@ -308,7 +296,6 @@ struct iwl_cfg iwl2030_2bgn_cfg = {
        .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
        .lib = &iwl2000_lib,                                    \
        .base_params = &iwl2000_base_params,                    \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .temp_offset_v2 = true,                                 \
        .led_mode = IWL_LED_RF_STATE,                           \
@@ -338,7 +325,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
        .lib = &iwl2030_lib,                                    \
        .base_params = &iwl2030_base_params,                    \
        .bt_params = &iwl2030_bt_params,                        \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .temp_offset_v2 = true,                                 \
        .led_mode = IWL_LED_RF_STATE,                           \
index c511c98..6706d7c 100644 (file)
@@ -134,10 +134,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
 
 #define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF  (-5)
 
-static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd)
 {
        u16 temperature, voltage;
-       __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
+       __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd,
                                EEPROM_KELVIN_TEMPERATURE);
 
        temperature = le16_to_cpu(temp_calib[0]);
@@ -151,7 +151,7 @@ static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
 {
        const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
        s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
-                       iwl_temp_calib_to_offset(priv);
+                       iwl_temp_calib_to_offset(priv->shrd);
 
        hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
 }
@@ -186,14 +186,7 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
        iwl5000_set_ct_threshold(priv);
 
        /* Set initial sensitivity parameters */
-       /* Set initial calibration set */
        hw_params(priv).sens = &iwl5000_sensitivity;
-       hw_params(priv).calib_init_cfg =
-               BIT(IWL_CALIB_XTAL)             |
-               BIT(IWL_CALIB_LO)               |
-               BIT(IWL_CALIB_TX_IQ)            |
-               BIT(IWL_CALIB_TX_IQ_PERD)       |
-               BIT(IWL_CALIB_BASE_BAND);
 
        return 0;
 }
@@ -222,14 +215,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
        iwl5150_set_ct_threshold(priv);
 
        /* Set initial sensitivity parameters */
-       /* Set initial calibration set */
        hw_params(priv).sens = &iwl5150_sensitivity;
-       hw_params(priv).calib_init_cfg =
-               BIT(IWL_CALIB_LO)               |
-               BIT(IWL_CALIB_TX_IQ)            |
-               BIT(IWL_CALIB_BASE_BAND);
-       if (priv->cfg->need_dc_calib)
-               hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
 
        return 0;
 }
@@ -237,7 +223,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
 static void iwl5150_temperature(struct iwl_priv *priv)
 {
        u32 vt = 0;
-       s32 offset =  iwl_temp_calib_to_offset(priv);
+       s32 offset =  iwl_temp_calib_to_offset(priv->shrd);
 
        vt = le32_to_cpu(priv->statistics.common.temperature);
        vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
@@ -364,6 +350,7 @@ static struct iwl_base_params iwl5000_base_params = {
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
        .no_idle_support = true,
+       .wd_disable = true,
 };
 static struct iwl_ht_params iwl5000_ht_params = {
        .ht_greenfield_support = true,
@@ -433,7 +420,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
        .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,       \
        .lib = &iwl5150_lib,                                    \
        .base_params = &iwl5000_base_params,                    \
-       .need_dc_calib = true,                                  \
+       .no_xtal_calib = true,                                  \
        .led_mode = IWL_LED_BLINK,                              \
        .internal_wimax_coex = true
 
index ee3363f..3e277b6 100644 (file)
 #include "iwl-cfg.h"
 
 /* Highest firmware API version supported */
-#define IWL6000_UCODE_API_MAX 4
+#define IWL6000_UCODE_API_MAX 6
 #define IWL6050_UCODE_API_MAX 5
 #define IWL6000G2_UCODE_API_MAX 6
 
 /* Oldest version we won't warn about */
+#define IWL6000_UCODE_API_OK 4
 #define IWL6000G2_UCODE_API_OK 5
 
 /* Lowest firmware API version supported */
@@ -80,7 +81,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
 static void iwl6050_additional_nic_config(struct iwl_priv *priv)
 {
        /* Indicate calibration version to uCode. */
-       if (iwlagn_eeprom_calib_version(priv) >= 6)
+       if (iwl_eeprom_calib_version(priv->shrd) >= 6)
                iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
                                CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
 }
@@ -88,7 +89,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
 static void iwl6150_additional_nic_config(struct iwl_priv *priv)
 {
        /* Indicate calibration version to uCode. */
-       if (iwlagn_eeprom_calib_version(priv) >= 6)
+       if (iwl_eeprom_calib_version(priv->shrd) >= 6)
                iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
                                CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
        iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
@@ -164,17 +165,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
        iwl6000_set_ct_threshold(priv);
 
        /* Set initial sensitivity parameters */
-       /* Set initial calibration set */
        hw_params(priv).sens = &iwl6000_sensitivity;
-       hw_params(priv).calib_init_cfg =
-               BIT(IWL_CALIB_XTAL)             |
-               BIT(IWL_CALIB_LO)               |
-               BIT(IWL_CALIB_TX_IQ)            |
-               BIT(IWL_CALIB_BASE_BAND);
-       if (priv->cfg->need_dc_calib)
-               hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
-       if (priv->cfg->need_temp_offset_calib)
-               hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
 
        return 0;
 }
@@ -364,7 +355,6 @@ static struct iwl_bt_params iwl6000_bt_params = {
        .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION,       \
        .lib = &iwl6000_lib,                                    \
        .base_params = &iwl6000_g2_base_params,                 \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .led_mode = IWL_LED_RF_STATE
 
@@ -406,7 +396,6 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
        .lib = &iwl6030_lib,                                    \
        .base_params = &iwl6000_g2_base_params,                 \
        .bt_params = &iwl6000_bt_params,                        \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .led_mode = IWL_LED_RF_STATE,                           \
        .adv_pm = true                                          \
@@ -469,6 +458,7 @@ struct iwl_cfg iwl130_bg_cfg = {
 #define IWL_DEVICE_6000i                                       \
        .fw_name_pre = IWL6000_FW_PRE,                          \
        .ucode_api_max = IWL6000_UCODE_API_MAX,                 \
+       .ucode_api_ok = IWL6000_UCODE_API_OK,                   \
        .ucode_api_min = IWL6000_UCODE_API_MIN,                 \
        .valid_tx_ant = ANT_BC,         /* .cfg overwrite */    \
        .valid_rx_ant = ANT_BC,         /* .cfg overwrite */    \
@@ -506,7 +496,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
        .eeprom_ver = EEPROM_6050_EEPROM_VERSION,               \
        .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,       \
        .base_params = &iwl6050_base_params,                    \
-       .need_dc_calib = true,                                  \
        .led_mode = IWL_LED_BLINK,                              \
        .internal_wimax_coex = true
 
@@ -530,7 +519,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
        .eeprom_ver = EEPROM_6150_EEPROM_VERSION,               \
        .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,       \
        .base_params = &iwl6050_base_params,                    \
-       .need_dc_calib = true,                                  \
        .led_mode = IWL_LED_BLINK,                              \
        .internal_wimax_coex = true
 
@@ -549,17 +537,17 @@ struct iwl_cfg iwl6000_3agn_cfg = {
        .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
        .fw_name_pre = IWL6000_FW_PRE,
        .ucode_api_max = IWL6000_UCODE_API_MAX,
+       .ucode_api_ok = IWL6000_UCODE_API_OK,
        .ucode_api_min = IWL6000_UCODE_API_MIN,
        .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
        .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
        .lib = &iwl6000_lib,
        .base_params = &iwl6000_base_params,
        .ht_params = &iwl6000_ht_params,
-       .need_dc_calib = true,
        .led_mode = IWL_LED_BLINK,
 };
 
-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
 MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
index 03bac48..16971a0 100644 (file)
@@ -82,56 +82,64 @@ struct statistics_general_data {
        u32 beacon_energy_c;
 };
 
-int iwl_send_calib_results(struct iwl_priv *priv)
+int iwl_send_calib_results(struct iwl_trans *trans)
 {
-       int ret = 0;
-       int i = 0;
-
        struct iwl_host_cmd hcmd = {
                .id = REPLY_PHY_CALIBRATION_CMD,
                .flags = CMD_SYNC,
        };
-
-       for (i = 0; i < IWL_CALIB_MAX; i++) {
-               if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
-                   priv->calib_results[i].buf) {
-                       hcmd.len[0] = priv->calib_results[i].buf_len;
-                       hcmd.data[0] = priv->calib_results[i].buf;
-                       hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
-                       ret = iwl_trans_send_cmd(trans(priv), &hcmd);
-                       if (ret) {
-                               IWL_ERR(priv, "Error %d iteration %d\n",
-                                       ret, i);
-                               break;
-                       }
+       struct iwl_calib_result *res;
+
+       list_for_each_entry(res, &trans->calib_results, list) {
+               int ret;
+
+               hcmd.len[0] = res->cmd_len;
+               hcmd.data[0] = &res->hdr;
+               hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+               ret = iwl_trans_send_cmd(trans, &hcmd);
+               if (ret) {
+                       IWL_ERR(trans, "Error %d on calib cmd %d\n",
+                               ret, res->hdr.op_code);
+                       return ret;
                }
        }
 
-       return ret;
+       return 0;
 }
 
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
+int iwl_calib_set(struct iwl_trans *trans,
+                 const struct iwl_calib_hdr *cmd, int len)
 {
-       if (res->buf_len != len) {
-               kfree(res->buf);
-               res->buf = kzalloc(len, GFP_ATOMIC);
-       }
-       if (unlikely(res->buf == NULL))
+       struct iwl_calib_result *res, *tmp;
+
+       res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
+                     GFP_ATOMIC);
+       if (!res)
                return -ENOMEM;
+       memcpy(&res->hdr, cmd, len);
+       res->cmd_len = len;
+
+       list_for_each_entry(tmp, &trans->calib_results, list) {
+               if (tmp->hdr.op_code == res->hdr.op_code) {
+                       list_replace(&tmp->list, &res->list);
+                       kfree(tmp);
+                       return 0;
+               }
+       }
+
+       /* wasn't in list already */
+       list_add_tail(&res->list, &trans->calib_results);
 
-       res->buf_len = len;
-       memcpy(res->buf, buf, len);
        return 0;
 }
 
-void iwl_calib_free_results(struct iwl_priv *priv)
+void iwl_calib_free_results(struct iwl_trans *trans)
 {
-       int i;
+       struct iwl_calib_result *res, *tmp;
 
-       for (i = 0; i < IWL_CALIB_MAX; i++) {
-               kfree(priv->calib_results[i].buf);
-               priv->calib_results[i].buf = NULL;
-               priv->calib_results[i].buf_len = 0;
+       list_for_each_entry_safe(res, tmp, &trans->calib_results, list) {
+               list_del(&res->list);
+               kfree(res);
        }
 }
 
index a869fc9..10275ce 100644 (file)
@@ -72,8 +72,4 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv);
 void iwl_init_sensitivity(struct iwl_priv *priv);
 void iwl_reset_run_time_calib(struct iwl_priv *priv);
 
-int iwl_send_calib_results(struct iwl_priv *priv);
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
-void iwl_calib_free_results(struct iwl_priv *priv);
-
 #endif /* __iwl_calib_h__ */
index 0bc9622..057f952 100644 (file)
@@ -92,11 +92,11 @@ void iwlagn_temperature(struct iwl_priv *priv)
        iwl_tt_handler(priv);
 }
 
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd)
 {
        struct iwl_eeprom_calib_hdr *hdr;
 
-       hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+       hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd,
                                                        EEPROM_CALIB_ALL);
        return hdr->version;
 
@@ -105,7 +105,7 @@ u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
 /*
  * EEPROM
  */
-static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
+static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address)
 {
        u16 offset = 0;
 
@@ -114,31 +114,31 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
 
        switch (address & INDIRECT_TYPE_MSK) {
        case INDIRECT_HOST:
-               offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
+               offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST);
                break;
        case INDIRECT_GENERAL:
-               offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
+               offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL);
                break;
        case INDIRECT_REGULATORY:
-               offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
+               offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY);
                break;
        case INDIRECT_TXP_LIMIT:
-               offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
+               offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT);
                break;
        case INDIRECT_TXP_LIMIT_SIZE:
-               offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
+               offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE);
                break;
        case INDIRECT_CALIBRATION:
-               offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
+               offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION);
                break;
        case INDIRECT_PROCESS_ADJST:
-               offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
+               offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST);
                break;
        case INDIRECT_OTHERS:
-               offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
+               offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS);
                break;
        default:
-               IWL_ERR(priv, "illegal indirect type: 0x%X\n",
+               IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n",
                address & INDIRECT_TYPE_MSK);
                break;
        }
@@ -147,11 +147,11 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
        return (address & ADDRESS_MSK) + (offset << 1);
 }
 
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset)
 {
-       u32 address = eeprom_indirect_address(priv, offset);
-       BUG_ON(address >= priv->cfg->base_params->eeprom_size);
-       return &priv->eeprom[address];
+       u32 address = eeprom_indirect_address(shrd, offset);
+       BUG_ON(address >= shrd->priv->cfg->base_params->eeprom_size);
+       return &shrd->eeprom[address];
 }
 
 struct iwl_mod_params iwlagn_mod_params = {
@@ -934,57 +934,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
        return ant;
 }
 
-/* notification wait support */
-void iwlagn_init_notification_wait(struct iwl_priv *priv,
-                                  struct iwl_notification_wait *wait_entry,
-                                  u8 cmd,
-                                  void (*fn)(struct iwl_priv *priv,
-                                             struct iwl_rx_packet *pkt,
-                                             void *data),
-                                  void *fn_data)
-{
-       wait_entry->fn = fn;
-       wait_entry->fn_data = fn_data;
-       wait_entry->cmd = cmd;
-       wait_entry->triggered = false;
-       wait_entry->aborted = false;
-
-       spin_lock_bh(&priv->notif_wait_lock);
-       list_add(&wait_entry->list, &priv->notif_waits);
-       spin_unlock_bh(&priv->notif_wait_lock);
-}
-
-int iwlagn_wait_notification(struct iwl_priv *priv,
-                            struct iwl_notification_wait *wait_entry,
-                            unsigned long timeout)
-{
-       int ret;
-
-       ret = wait_event_timeout(priv->notif_waitq,
-                                wait_entry->triggered || wait_entry->aborted,
-                                timeout);
-
-       spin_lock_bh(&priv->notif_wait_lock);
-       list_del(&wait_entry->list);
-       spin_unlock_bh(&priv->notif_wait_lock);
-
-       if (wait_entry->aborted)
-               return -EIO;
-
-       /* return value is always >= 0 */
-       if (ret <= 0)
-               return -ETIMEDOUT;
-       return 0;
-}
-
-void iwlagn_remove_notification(struct iwl_priv *priv,
-                               struct iwl_notification_wait *wait_entry)
-{
-       spin_lock_bh(&priv->notif_wait_lock);
-       list_del(&wait_entry->list);
-       spin_unlock_bh(&priv->notif_wait_lock);
-}
-
 #ifdef CONFIG_PM_SLEEP
 static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
 {
@@ -1208,7 +1157,7 @@ int iwlagn_suspend(struct iwl_priv *priv,
         * For QoS counters, we store the one to use next, so subtract 0x10
         * since the uCode will add 0x10 before using the value.
         */
-       for (i = 0; i < 8; i++) {
+       for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
                seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
                seq -= 0x10;
                wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
index 359c47a..a23835a 100644 (file)
@@ -298,7 +298,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
        } else
                return IWL_MAX_TID_COUNT;
 
-       if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+       if (unlikely(tid >= IWL_MAX_TID_COUNT))
                return IWL_MAX_TID_COUNT;
 
        tl = &lq_data->load[tid];
@@ -352,7 +352,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
        lq_sta->active_mimo2_rate  = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
        lq_sta->active_mimo3_rate  = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
 
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
        /* testmode has higher priority to overwirte the fixed rate */
        if (priv->tm_fixed_rate)
                lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
@@ -379,7 +379,7 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
        s32 index;
        struct iwl_traffic_load *tl = NULL;
 
-       if (tid >= TID_MAX_LOAD_COUNT)
+       if (tid >= IWL_MAX_TID_COUNT)
                return 0;
 
        tl = &(lq_data->load[tid]);
@@ -444,11 +444,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
                              struct iwl_lq_sta *lq_data,
                              struct ieee80211_sta *sta)
 {
-       if (tid < TID_MAX_LOAD_COUNT)
+       if (tid < IWL_MAX_TID_COUNT)
                rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
        else
-               IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
-                       tid, TID_MAX_LOAD_COUNT);
+               IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n",
+                       tid, IWL_MAX_TID_COUNT);
 }
 
 static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
@@ -1081,7 +1081,7 @@ done:
        if (sta && sta->supp_rates[sband->band])
                rs_rate_scale_perform(priv, skb, sta, lq_sta);
 
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL)
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
        if ((priv->tm_fixed_rate) &&
            (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
                rs_program_fix_rate(priv, lq_sta);
@@ -2904,7 +2904,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
        if (sband->band == IEEE80211_BAND_5GHZ)
                lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
        lq_sta->is_agg = 0;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
        priv->tm_fixed_rate = 0;
 #endif
 #ifdef CONFIG_MAC80211_DEBUGFS
index f4f6deb..6675b3c 100644 (file)
@@ -281,7 +281,6 @@ enum {
 #define TID_QUEUE_CELL_SPACING 50      /*mS */
 #define TID_QUEUE_MAX_SIZE     20
 #define TID_ROUND_VALUE                5       /* mS */
-#define TID_MAX_LOAD_COUNT     8
 
 #define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
 #define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
@@ -402,7 +401,7 @@ struct iwl_lq_sta {
 
        struct iwl_link_quality_cmd lq;
        struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
-       struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+       struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
        u8 tx_agg_tid_en;
 #ifdef CONFIG_MAC80211_DEBUGFS
        struct dentry *rs_sta_dbgfs_scale_table_file;
index fdb4c37..9001c23 100644 (file)
@@ -117,6 +117,7 @@ const char *get_cmd_string(u8 cmd)
                IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
                IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
                IWL_CMD(REPLY_WOWLAN_GET_STATUS);
+               IWL_CMD(REPLY_D3_CONFIG);
        default:
                return "UNKNOWN";
 
@@ -1130,9 +1131,9 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
        priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
 
        /* set up notification wait support */
-       spin_lock_init(&priv->notif_wait_lock);
-       INIT_LIST_HEAD(&priv->notif_waits);
-       init_waitqueue_head(&priv->notif_waitq);
+       spin_lock_init(&priv->shrd->notif_wait_lock);
+       INIT_LIST_HEAD(&priv->shrd->notif_waits);
+       init_waitqueue_head(&priv->shrd->notif_waitq);
 
        /* Set up BT Rx handlers */
        if (priv->cfg->lib->bt_rx_handler_setup)
@@ -1151,11 +1152,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
         * even if the RX handler consumes the RXB we have
         * access to it in the notification wait entry.
         */
-       if (!list_empty(&priv->notif_waits)) {
+       if (!list_empty(&priv->shrd->notif_waits)) {
                struct iwl_notification_wait *w;
 
-               spin_lock(&priv->notif_wait_lock);
-               list_for_each_entry(w, &priv->notif_waits, list) {
+               spin_lock(&priv->shrd->notif_wait_lock);
+               list_for_each_entry(w, &priv->shrd->notif_waits, list) {
                        if (w->cmd != pkt->hdr.cmd)
                                continue;
                        IWL_DEBUG_RX(priv,
@@ -1164,11 +1165,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
                                pkt->hdr.cmd);
                        w->triggered = true;
                        if (w->fn)
-                               w->fn(priv, pkt, w->fn_data);
+                               w->fn(trans(priv), pkt, w->fn_data);
                }
-               spin_unlock(&priv->notif_wait_lock);
+               spin_unlock(&priv->shrd->notif_wait_lock);
 
-               wake_up_all(&priv->notif_waitq);
+               wake_up_all(&priv->shrd->notif_waitq);
        }
 
        if (priv->pre_rx_handler)
index 8de97f5..d21f535 100644 (file)
@@ -60,7 +60,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
        u8 old_dev_type = send->dev_type;
        int ret;
 
-       iwlagn_init_notification_wait(priv, &disable_wait,
+       iwl_init_notification_wait(priv->shrd, &disable_wait,
                                      REPLY_WIPAN_DEACTIVATION_COMPLETE,
                                      NULL, NULL);
 
@@ -74,9 +74,9 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
 
        if (ret) {
                IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
-               iwlagn_remove_notification(priv, &disable_wait);
+               iwl_remove_notification(priv->shrd, &disable_wait);
        } else {
-               ret = iwlagn_wait_notification(priv, &disable_wait, HZ);
+               ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ);
                if (ret)
                        IWL_ERR(priv, "Timed out waiting for PAN disable\n");
        }
@@ -529,6 +529,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        return 0;
 }
 
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+       struct iwl_rxon_context *ctx)
+{
+       if (conf_is_ht40_minus(conf)) {
+               ctx->ht.extension_chan_offset =
+                       IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+               ctx->ht.is_40mhz = true;
+       } else if (conf_is_ht40_plus(conf)) {
+               ctx->ht.extension_chan_offset =
+                       IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+               ctx->ht.is_40mhz = true;
+       } else {
+               ctx->ht.extension_chan_offset =
+                       IEEE80211_HT_PARAM_CHA_SEC_NONE;
+               ctx->ht.is_40mhz = false;
+       }
+}
+
 int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
 {
        struct iwl_priv *priv = hw->priv;
@@ -590,19 +608,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
                                ctx->ht.enabled = conf_is_ht(conf);
 
                        if (ctx->ht.enabled) {
-                               if (conf_is_ht40_minus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-                                       ctx->ht.is_40mhz = true;
-                               } else if (conf_is_ht40_plus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-                                       ctx->ht.is_40mhz = true;
-                               } else {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
-                                       ctx->ht.is_40mhz = false;
-                               }
+                               /* if HT40 is used, it should not change
+                                * after associated except channel switch */
+                               if (!ctx->ht.is_40mhz ||
+                                               !iwl_is_associated_ctx(ctx))
+                                       iwlagn_config_ht40(conf, ctx);
                        } else
                                ctx->ht.is_40mhz = false;
 
index 901fd94..63d948d 100644 (file)
@@ -135,8 +135,8 @@ static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
        u16 size = (u16)sizeof(struct iwl_addsta_cmd);
        struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
        memcpy(addsta, cmd, size);
-       /* resrved in 5000 */
-       addsta->rate_n_flags = cpu_to_le16(0);
+       /* resrved in agn */
+       addsta->legacy_reserved = cpu_to_le16(0);
        return size;
 }
 
@@ -1250,9 +1250,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
 
        switch (keyconf->cipher) {
        case WLAN_CIPHER_SUITE_TKIP:
-               keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-               keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
                if (sta)
                        addr = sta->addr;
                else /* station mode case only */
@@ -1265,8 +1262,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
                                          seq.tkip.iv32, p1k, CMD_SYNC);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
-               keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-               /* fall through */
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
                ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
index a1a95d5..81754cd 100644 (file)
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
                tx_cmd->tid_tspec = qc[0] & 0xf;
                tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
        } else {
-               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               else
+                       tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
        }
 
        iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
@@ -148,7 +151,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
        if (ieee80211_is_data(fc)) {
                tx_cmd->initial_rate_index = 0;
                tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
                if (priv->tm_fixed_rate) {
                        /*
                         * rate overwrite by testmode
@@ -161,7 +164,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
                }
 #endif
                return;
-       }
+       } else if (ieee80211_is_back_req(fc))
+               tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
 
        /**
         * If the current TX rate stored in mac80211 has the MCS bit set, it's
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
deleted file mode 100644 (file)
index 9ec315b..0000000
+++ /dev/null
@@ -1,712 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/dma-mapping.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
-#include "iwl-agn-calib.h"
-#include "iwl-trans.h"
-#include "iwl-fh.h"
-
-static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
-       {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
-        0, COEX_UNASSOC_IDLE_FLAGS},
-       {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
-        0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
-       {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
-        0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
-       {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
-        0, COEX_CALIBRATION_FLAGS},
-       {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
-        0, COEX_PERIODIC_CALIBRATION_FLAGS},
-       {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
-        0, COEX_CONNECTION_ESTAB_FLAGS},
-       {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
-        0, COEX_ASSOCIATED_IDLE_FLAGS},
-       {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
-        0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
-       {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
-        0, COEX_ASSOC_AUTO_SCAN_FLAGS},
-       {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
-        0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
-       {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
-       {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
-       {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
-        0, COEX_STAND_ALONE_DEBUG_FLAGS},
-       {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
-        0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
-       {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
-       {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
-};
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc)
-{
-       if (desc->v_addr)
-               dma_free_coherent(bus->dev, desc->len,
-                                 desc->v_addr, desc->p_addr);
-       desc->v_addr = NULL;
-       desc->len = 0;
-}
-
-static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img)
-{
-       iwl_free_fw_desc(bus, &img->code);
-       iwl_free_fw_desc(bus, &img->data);
-}
-
-void iwl_dealloc_ucode(struct iwl_trans *trans)
-{
-       iwl_free_fw_img(bus(trans), &trans->ucode_rt);
-       iwl_free_fw_img(bus(trans), &trans->ucode_init);
-       iwl_free_fw_img(bus(trans), &trans->ucode_wowlan);
-}
-
-int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
-                     const void *data, size_t len)
-{
-       if (!len) {
-               desc->v_addr = NULL;
-               return -EINVAL;
-       }
-
-       desc->v_addr = dma_alloc_coherent(bus->dev, len,
-                                         &desc->p_addr, GFP_KERNEL);
-       if (!desc->v_addr)
-               return -ENOMEM;
-
-       desc->len = len;
-       memcpy(desc->v_addr, data, len);
-       return 0;
-}
-
-/*
- * ucode
- */
-static int iwlagn_load_section(struct iwl_trans *trans, const char *name,
-                               struct fw_desc *image, u32 dst_addr)
-{
-       struct iwl_bus *bus = bus(trans);
-       dma_addr_t phy_addr = image->p_addr;
-       u32 byte_cnt = image->len;
-       int ret;
-
-       trans->ucode_write_complete = 0;
-
-       iwl_write_direct32(bus,
-               FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
-               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
-
-       iwl_write_direct32(bus,
-               FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
-
-       iwl_write_direct32(bus,
-               FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
-               phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
-
-       iwl_write_direct32(bus,
-               FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
-               (iwl_get_dma_hi_addr(phy_addr)
-                       << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
-
-       iwl_write_direct32(bus,
-               FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
-               1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
-               1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
-               FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
-
-       iwl_write_direct32(bus,
-               FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
-               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE       |
-               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE    |
-               FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
-
-       IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name);
-       ret = wait_event_timeout(trans->shrd->wait_command_queue,
-                                trans->ucode_write_complete, 5 * HZ);
-       if (!ret) {
-               IWL_ERR(trans, "Could not load the %s uCode section\n",
-                       name);
-               return -ETIMEDOUT;
-       }
-
-       return 0;
-}
-
-static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
-                                       enum iwl_ucode_type ucode_type)
-{
-       switch (ucode_type) {
-       case IWL_UCODE_INIT:
-               return &trans->ucode_init;
-       case IWL_UCODE_WOWLAN:
-               return &trans->ucode_wowlan;
-       case IWL_UCODE_REGULAR:
-               return &trans->ucode_rt;
-       case IWL_UCODE_NONE:
-               break;
-       }
-       return NULL;
-}
-
-static int iwlagn_load_given_ucode(struct iwl_trans *trans,
-                                  enum iwl_ucode_type ucode_type)
-{
-       int ret = 0;
-       struct fw_img *image = iwl_get_ucode_image(trans, ucode_type);
-
-
-       if (!image) {
-               IWL_ERR(trans, "Invalid ucode requested (%d)\n",
-                       ucode_type);
-               return -EINVAL;
-       }
-
-       ret = iwlagn_load_section(trans, "INST", &image->code,
-                                  IWLAGN_RTC_INST_LOWER_BOUND);
-       if (ret)
-               return ret;
-
-       return iwlagn_load_section(trans, "DATA", &image->data,
-                                   IWLAGN_RTC_DATA_LOWER_BOUND);
-}
-
-/*
- *  Calibration
- */
-static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
-{
-       struct iwl_calib_xtal_freq_cmd cmd;
-       __le16 *xtal_calib =
-               (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
-
-       iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
-       cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
-       cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
-       return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
-                            (u8 *)&cmd, sizeof(cmd));
-}
-
-static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
-{
-       struct iwl_calib_temperature_offset_cmd cmd;
-       __le16 *offset_calib =
-               (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
-
-       memset(&cmd, 0, sizeof(cmd));
-       iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
-       memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
-       if (!(cmd.radio_sensor_offset))
-               cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
-
-       IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
-                       le16_to_cpu(cmd.radio_sensor_offset));
-       return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
-                            (u8 *)&cmd, sizeof(cmd));
-}
-
-static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
-{
-       struct iwl_calib_temperature_offset_v2_cmd cmd;
-       __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
-                                    EEPROM_KELVIN_TEMPERATURE);
-       __le16 *offset_calib_low =
-               (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
-       struct iwl_eeprom_calib_hdr *hdr;
-
-       memset(&cmd, 0, sizeof(cmd));
-       iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
-       hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
-                                                       EEPROM_CALIB_ALL);
-       memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
-               sizeof(*offset_calib_high));
-       memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
-               sizeof(*offset_calib_low));
-       if (!(cmd.radio_sensor_offset_low)) {
-               IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
-               cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
-               cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
-       }
-       memcpy(&cmd.burntVoltageRef, &hdr->voltage,
-               sizeof(hdr->voltage));
-
-       IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
-                       le16_to_cpu(cmd.radio_sensor_offset_high));
-       IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
-                       le16_to_cpu(cmd.radio_sensor_offset_low));
-       IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
-                       le16_to_cpu(cmd.burntVoltageRef));
-
-       return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
-                            (u8 *)&cmd, sizeof(cmd));
-}
-
-static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
-{
-       struct iwl_calib_cfg_cmd calib_cfg_cmd;
-       struct iwl_host_cmd cmd = {
-               .id = CALIBRATION_CFG_CMD,
-               .len = { sizeof(struct iwl_calib_cfg_cmd), },
-               .data = { &calib_cfg_cmd, },
-       };
-
-       memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
-       calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
-       calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
-       calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
-       calib_cfg_cmd.ucd_calib_cfg.flags =
-               IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
-
-       return iwl_trans_send_cmd(trans(priv), &cmd);
-}
-
-int iwlagn_rx_calib_result(struct iwl_priv *priv,
-                           struct iwl_rx_mem_buffer *rxb,
-                           struct iwl_device_cmd *cmd)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
-       int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-       int index;
-
-       /* reduce the size of the length field itself */
-       len -= 4;
-
-       /* Define the order in which the results will be sent to the runtime
-        * uCode. iwl_send_calib_results sends them in a row according to
-        * their index. We sort them here
-        */
-       switch (hdr->op_code) {
-       case IWL_PHY_CALIBRATE_DC_CMD:
-               index = IWL_CALIB_DC;
-               break;
-       case IWL_PHY_CALIBRATE_LO_CMD:
-               index = IWL_CALIB_LO;
-               break;
-       case IWL_PHY_CALIBRATE_TX_IQ_CMD:
-               index = IWL_CALIB_TX_IQ;
-               break;
-       case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
-               index = IWL_CALIB_TX_IQ_PERD;
-               break;
-       case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
-               index = IWL_CALIB_BASE_BAND;
-               break;
-       default:
-               IWL_ERR(priv, "Unknown calibration notification %d\n",
-                         hdr->op_code);
-               return -1;
-       }
-       iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
-       return 0;
-}
-
-int iwlagn_init_alive_start(struct iwl_priv *priv)
-{
-       int ret;
-
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
-               /*
-                * Tell uCode we are ready to perform calibration
-                * need to perform this before any calibration
-                * no need to close the envlope since we are going
-                * to load the runtime uCode later.
-                */
-               ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
-                       BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
-               if (ret)
-                       return ret;
-
-       }
-
-       ret = iwlagn_send_calib_cfg(priv);
-       if (ret)
-               return ret;
-
-       /**
-        * temperature offset calibration is only needed for runtime ucode,
-        * so prepare the value now.
-        */
-       if (priv->cfg->need_temp_offset_calib) {
-               if (priv->cfg->temp_offset_v2)
-                       return iwlagn_set_temperature_offset_calib_v2(priv);
-               else
-                       return iwlagn_set_temperature_offset_calib(priv);
-       }
-
-       return 0;
-}
-
-static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
-{
-       struct iwl_wimax_coex_cmd coex_cmd;
-
-       if (priv->cfg->base_params->support_wimax_coexist) {
-               /* UnMask wake up src at associated sleep */
-               coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
-
-               /* UnMask wake up src at unassociated sleep */
-               coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
-               memcpy(coex_cmd.sta_prio, cu_priorities,
-                       sizeof(struct iwl_wimax_coex_event_entry) *
-                        COEX_NUM_OF_EVENTS);
-
-               /* enabling the coexistence feature */
-               coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
-
-               /* enabling the priorities tables */
-               coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
-       } else {
-               /* coexistence is disabled */
-               memset(&coex_cmd, 0, sizeof(coex_cmd));
-       }
-       return iwl_trans_send_cmd_pdu(trans(priv),
-                               COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
-                               sizeof(coex_cmd), &coex_cmd);
-}
-
-static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
-       ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-       ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-               (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-       ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-       ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-               (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-       ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-       ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-               (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-       ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-       ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-       ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-       0, 0, 0, 0, 0, 0, 0
-};
-
-void iwlagn_send_prio_tbl(struct iwl_priv *priv)
-{
-       struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
-
-       memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
-               sizeof(iwlagn_bt_prio_tbl));
-       if (iwl_trans_send_cmd_pdu(trans(priv),
-                               REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
-                               sizeof(prio_tbl_cmd), &prio_tbl_cmd))
-               IWL_ERR(priv, "failed to send BT prio tbl command\n");
-}
-
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
-{
-       struct iwl_bt_coex_prot_env_cmd env_cmd;
-       int ret;
-
-       env_cmd.action = action;
-       env_cmd.type = type;
-       ret = iwl_trans_send_cmd_pdu(trans(priv),
-                              REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
-                              sizeof(env_cmd), &env_cmd);
-       if (ret)
-               IWL_ERR(priv, "failed to send BT env command\n");
-       return ret;
-}
-
-
-static int iwlagn_alive_notify(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx;
-       int ret;
-
-       if (!priv->tx_cmd_pool)
-               priv->tx_cmd_pool =
-                       kmem_cache_create("iwlagn_dev_cmd",
-                                         sizeof(struct iwl_device_cmd),
-                                         sizeof(void *), 0, NULL);
-
-       if (!priv->tx_cmd_pool)
-               return -ENOMEM;
-
-       iwl_trans_tx_start(trans(priv));
-       for_each_context(priv, ctx)
-               ctx->last_tx_rejected = false;
-
-       ret = iwlagn_send_wimax_coex(priv);
-       if (ret)
-               return ret;
-
-       ret = iwlagn_set_Xtal_calib(priv);
-       if (ret)
-               return ret;
-
-       return iwl_send_calib_results(priv);
-}
-
-
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- *   using sample data 100 bytes apart.  If these sample points are good,
- *   it's a pretty good bet that everything between them is good, too.
- */
-static int iwl_verify_inst_sparse(struct iwl_bus *bus,
-                                     struct fw_desc *fw_desc)
-{
-       __le32 *image = (__le32 *)fw_desc->v_addr;
-       u32 len = fw_desc->len;
-       u32 val;
-       u32 i;
-
-       IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
-
-       for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
-               /* read data comes through single port, auto-incr addr */
-               /* NOTE: Use the debugless read so we don't flood kernel log
-                * if IWL_DL_IO is set */
-               iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
-                       i + IWLAGN_RTC_INST_LOWER_BOUND);
-               val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
-               if (val != le32_to_cpu(*image))
-                       return -EIO;
-       }
-
-       return 0;
-}
-
-static void iwl_print_mismatch_inst(struct iwl_bus *bus,
-                                   struct fw_desc *fw_desc)
-{
-       __le32 *image = (__le32 *)fw_desc->v_addr;
-       u32 len = fw_desc->len;
-       u32 val;
-       u32 offs;
-       int errors = 0;
-
-       IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
-
-       iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
-                          IWLAGN_RTC_INST_LOWER_BOUND);
-
-       for (offs = 0;
-            offs < len && errors < 20;
-            offs += sizeof(u32), image++) {
-               /* read data comes through single port, auto-incr addr */
-               val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
-               if (val != le32_to_cpu(*image)) {
-                       IWL_ERR(bus, "uCode INST section at "
-                               "offset 0x%x, is 0x%x, s/b 0x%x\n",
-                               offs, val, le32_to_cpu(*image));
-                       errors++;
-               }
-       }
-}
-
-/**
- * iwl_verify_ucode - determine which instruction image is in SRAM,
- *    and verify its contents
- */
-static int iwl_verify_ucode(struct iwl_trans *trans,
-                           enum iwl_ucode_type ucode_type)
-{
-       struct fw_img *img = iwl_get_ucode_image(trans, ucode_type);
-
-       if (!img) {
-               IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type);
-               return -EINVAL;
-       }
-
-       if (!iwl_verify_inst_sparse(bus(trans), &img->code)) {
-               IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n");
-               return 0;
-       }
-
-       IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
-
-       iwl_print_mismatch_inst(bus(trans), &img->code);
-       return -EIO;
-}
-
-struct iwlagn_alive_data {
-       bool valid;
-       u8 subtype;
-};
-
-static void iwlagn_alive_fn(struct iwl_priv *priv,
-                           struct iwl_rx_packet *pkt,
-                           void *data)
-{
-       struct iwlagn_alive_data *alive_data = data;
-       struct iwl_alive_resp *palive;
-
-       palive = &pkt->u.alive_frame;
-
-       IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
-                      "0x%01X 0x%01X\n",
-                      palive->is_valid, palive->ver_type,
-                      palive->ver_subtype);
-
-       priv->device_pointers.error_event_table =
-               le32_to_cpu(palive->error_event_table_ptr);
-       priv->device_pointers.log_event_table =
-               le32_to_cpu(palive->log_event_table_ptr);
-
-       alive_data->subtype = palive->ver_subtype;
-       alive_data->valid = palive->is_valid == UCODE_VALID_OK;
-}
-
-#define UCODE_ALIVE_TIMEOUT    HZ
-#define UCODE_CALIB_TIMEOUT    (2*HZ)
-
-int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
-                                enum iwl_ucode_type ucode_type)
-{
-       struct iwl_notification_wait alive_wait;
-       struct iwlagn_alive_data alive_data;
-       int ret;
-       enum iwl_ucode_type old_type;
-
-       ret = iwl_trans_start_device(trans(priv));
-       if (ret)
-               return ret;
-
-       iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE,
-                                     iwlagn_alive_fn, &alive_data);
-
-       old_type = priv->ucode_type;
-       priv->ucode_type = ucode_type;
-
-       ret = iwlagn_load_given_ucode(trans(priv), ucode_type);
-       if (ret) {
-               priv->ucode_type = old_type;
-               iwlagn_remove_notification(priv, &alive_wait);
-               return ret;
-       }
-
-       iwl_trans_kick_nic(trans(priv));
-
-       /*
-        * Some things may run in the background now, but we
-        * just wait for the ALIVE notification here.
-        */
-       ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT);
-       if (ret) {
-               priv->ucode_type = old_type;
-               return ret;
-       }
-
-       if (!alive_data.valid) {
-               IWL_ERR(priv, "Loaded ucode is not valid!\n");
-               priv->ucode_type = old_type;
-               return -EIO;
-       }
-
-       /*
-        * This step takes a long time (60-80ms!!) and
-        * WoWLAN image should be loaded quickly, so
-        * skip it for WoWLAN.
-        */
-       if (ucode_type != IWL_UCODE_WOWLAN) {
-               ret = iwl_verify_ucode(trans(priv), ucode_type);
-               if (ret) {
-                       priv->ucode_type = old_type;
-                       return ret;
-               }
-
-               /* delay a bit to give rfkill time to run */
-               msleep(5);
-       }
-
-       ret = iwlagn_alive_notify(priv);
-       if (ret) {
-               IWL_WARN(priv,
-                       "Could not complete ALIVE transition: %d\n", ret);
-               priv->ucode_type = old_type;
-               return ret;
-       }
-
-       return 0;
-}
-
-int iwlagn_run_init_ucode(struct iwl_priv *priv)
-{
-       struct iwl_notification_wait calib_wait;
-       int ret;
-
-       lockdep_assert_held(&priv->shrd->mutex);
-
-       /* No init ucode required? Curious, but maybe ok */
-       if (!trans(priv)->ucode_init.code.len)
-               return 0;
-
-       if (priv->ucode_type != IWL_UCODE_NONE)
-               return 0;
-
-       iwlagn_init_notification_wait(priv, &calib_wait,
-                                     CALIBRATION_COMPLETE_NOTIFICATION,
-                                     NULL, NULL);
-
-       /* Will also start the device */
-       ret = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
-       if (ret)
-               goto error;
-
-       ret = iwlagn_init_alive_start(priv);
-       if (ret)
-               goto error;
-
-       /*
-        * Some things may run in the background now, but we
-        * just wait for the calibration complete notification.
-        */
-       ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT);
-
-       goto out;
-
- error:
-       iwlagn_remove_notification(priv, &calib_wait);
- out:
-       /* Whatever happened, stop the device */
-       iwl_trans_stop_device(trans(priv));
-       return ret;
-}
index e235e84..f5fe42d 100644 (file)
@@ -366,7 +366,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
        u32 num_wraps;  /* # times uCode wrapped to top of log */
        u32 next_entry; /* index of next entry to be written by uCode */
 
-       base = priv->device_pointers.error_event_table;
+       base = priv->shrd->device_pointers.error_event_table;
        if (iwlagn_hw_valid_rtc_data_addr(base)) {
                capacity = iwl_read_targ_mem(bus(priv), base);
                num_wraps = iwl_read_targ_mem(bus(priv),
@@ -1036,6 +1036,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
                priv->inst_evtlog_size =
                        priv->cfg->base_params->max_event_log_size;
        priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
+#ifndef CONFIG_IWLWIFI_P2P
+       ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+#endif
 
        priv->new_scan_threshold_behaviour =
                !!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
@@ -1057,7 +1060,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
                priv->sta_key_max_num = STA_KEY_MAX_NUM;
                priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
        }
-
        /*
         * figure out the offset of chain noise reset and gain commands
         * base on the size of standard phy calibration commands table size
@@ -1232,14 +1234,14 @@ int iwl_alive_start(struct iwl_priv *priv)
                priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
                priv->cur_rssi_ctx = NULL;
 
-               iwlagn_send_prio_tbl(priv);
+               iwl_send_prio_tbl(trans(priv));
 
                /* FIXME: w/a to force change uCode BT state machine */
-               ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+               ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
                                         BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
                if (ret)
                        return ret;
-               ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
+               ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE,
                                         BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
                if (ret)
                        return ret;
@@ -1575,6 +1577,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
 
        mutex_init(&priv->shrd->mutex);
 
+       INIT_LIST_HEAD(&trans(priv)->calib_results);
+
        priv->ieee_channels = NULL;
        priv->ieee_rates = NULL;
        priv->band = IEEE80211_BAND_2GHZ;
@@ -1631,7 +1635,6 @@ err:
 
 static void iwl_uninit_drv(struct iwl_priv *priv)
 {
-       iwl_calib_free_results(priv);
        iwl_free_geos(priv);
        iwl_free_channel_map(priv);
        if (priv->tx_cmd_pool)
@@ -1680,6 +1683,41 @@ static int iwl_set_hw_params(struct iwl_priv *priv)
 
 
 
+static void iwl_debug_config(struct iwl_priv *priv)
+{
+       dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
+#ifdef CONFIG_IWLWIFI_DEBUG
+               "enabled\n");
+#else
+               "disabled\n");
+#endif
+       dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               "enabled\n");
+#else
+               "disabled\n");
+#endif
+       dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+               "enabled\n");
+#else
+               "disabled\n");
+#endif
+
+       dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
+               "enabled\n");
+#else
+               "disabled\n");
+#endif
+       dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_P2P "
+#ifdef CONFIG_IWLWIFI_P2P
+               "enabled\n");
+#else
+               "disabled\n");
+#endif
+}
+
 int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
                struct iwl_cfg *cfg)
 {
@@ -1715,6 +1753,9 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
 
        SET_IEEE80211_DEV(hw, bus(priv)->dev);
 
+       /* what debugging capabilities we have */
+       iwl_debug_config(priv);
+
        IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
        priv->cfg = cfg;
 
@@ -1780,11 +1821,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
                goto out_free_eeprom;
 
        /* extract MAC Address */
-       iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
+       iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr);
        IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
        priv->hw->wiphy->addresses = priv->addresses;
        priv->hw->wiphy->n_addresses = 1;
-       num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
+       num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS);
        if (num_mac > 1) {
                memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
                       ETH_ALEN);
@@ -1849,7 +1890,7 @@ out_destroy_workqueue:
        priv->shrd->workqueue = NULL;
        iwl_uninit_drv(priv);
 out_free_eeprom:
-       iwl_eeprom_free(priv);
+       iwl_eeprom_free(priv->shrd);
 out_free_trans:
        iwl_trans_free(trans(priv));
 out_free_traffic_mem:
@@ -1888,7 +1929,7 @@ void __devexit iwl_remove(struct iwl_priv * priv)
 
        iwl_dealloc_ucode(trans(priv));
 
-       iwl_eeprom_free(priv);
+       iwl_eeprom_free(priv->shrd);
 
        /*netif_stop_queue(dev); */
        flush_workqueue(priv->shrd->workqueue);
@@ -1988,9 +2029,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
 module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
 MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
 
-module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
+module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
 MODULE_PARM_DESC(wd_disable,
-               "Disable stuck queue watchdog timer (default: 0 [enabled])");
+               "Disable stuck queue watchdog timer 0=system default, "
+               "1=disable, 2=enable (default: 0)");
 
 /*
  * set bt_coex_active to true, uCode will do kill/defer
index 5d8d2f4..eb453ea 100644 (file)
@@ -101,13 +101,15 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif,
                             struct ieee80211_bss_conf *bss_conf,
                             u32 changes);
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+                       struct iwl_rxon_context *ctx);
 
 /* uCode */
 int iwlagn_rx_calib_result(struct iwl_priv *priv,
                            struct iwl_rx_mem_buffer *rxb,
                            struct iwl_device_cmd *cmd);
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
-void iwlagn_send_prio_tbl(struct iwl_priv *priv);
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type);
+void iwl_send_prio_tbl(struct iwl_trans *trans);
 int iwlagn_run_init_ucode(struct iwl_priv *priv);
 int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
                                 enum iwl_ucode_type ucode_type);
@@ -115,7 +117,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
 /* lib */
 int iwlagn_send_tx_power(struct iwl_priv *priv);
 void iwlagn_temperature(struct iwl_priv *priv);
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd);
 int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
 void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
 int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
@@ -352,28 +354,12 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
 
 /* eeprom */
 void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
-
-/* notification wait support */
-void __acquires(wait_entry)
-iwlagn_init_notification_wait(struct iwl_priv *priv,
-                             struct iwl_notification_wait *wait_entry,
-                             u8 cmd,
-                             void (*fn)(struct iwl_priv *priv,
-                                        struct iwl_rx_packet *pkt,
-                                        void *data),
-                             void *fn_data);
-int __must_check __releases(wait_entry)
-iwlagn_wait_notification(struct iwl_priv *priv,
-                        struct iwl_notification_wait *wait_entry,
-                        unsigned long timeout);
-void __releases(wait_entry)
-iwlagn_remove_notification(struct iwl_priv *priv,
-                          struct iwl_notification_wait *wait_entry);
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
+
 extern int iwlagn_init_alive_start(struct iwl_priv *priv);
 extern int iwl_alive_start(struct iwl_priv *priv);
 /* svtool */
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
 extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
                                   int len);
 extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
index f4eccf5..265de39 100644 (file)
@@ -109,10 +109,10 @@ enum {
        /* RX, TX, LEDs */
        REPLY_TX = 0x1c,
        REPLY_LEDS_CMD = 0x48,
-       REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+       REPLY_TX_LINK_QUALITY_CMD = 0x4e,
 
        /* WiMAX coexistence */
-       COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
+       COEX_PRIORITY_TABLE_CMD = 0x5a,
        COEX_MEDIUM_NOTIFICATION = 0x5b,
        COEX_EVENT_CMD = 0x5c,
 
@@ -466,23 +466,27 @@ struct iwl_error_event_table {
        u32 frame_ptr;          /* frame pointer */
        u32 stack_ptr;          /* stack pointer */
        u32 hcmd;               /* last host command header */
-#if 0
-       /* no need to read the remainder, we don't use the values */
-       u32 isr0;               /* isr status register LMPM_NIC_ISR0: rxtx_flag */
-       u32 isr1;               /* isr status register LMPM_NIC_ISR1: host_flag */
-       u32 isr2;               /* isr status register LMPM_NIC_ISR2: enc_flag */
-       u32 isr3;               /* isr status register LMPM_NIC_ISR3: time_flag */
-       u32 isr4;               /* isr status register LMPM_NIC_ISR4: wico interrupt */
+       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
+                                * rxtx_flag */
+       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
+                                * host_flag */
+       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
+                                * enc_flag */
+       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
+                                * time_flag */
+       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
+                                * wico interrupt */
        u32 isr_pref;           /* isr status register LMPM_NIC_PREF_STAT */
        u32 wait_event;         /* wait event() caller address */
        u32 l2p_control;        /* L2pControlField */
        u32 l2p_duration;       /* L2pDurationField */
        u32 l2p_mhvalid;        /* L2pMhValidBits */
        u32 l2p_addr_match;     /* L2pAddrMatchStat */
-       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on (LMPM_PMG_SEL) */
-       u32 u_timestamp;        /* indicate when the date and time of the compilation */
+       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
+                                * (LMPM_PMG_SEL) */
+       u32 u_timestamp;        /* indicate when the date and time of the
+                                * compilation */
        u32 flow_handler;       /* FH read/write pointers, RX credit */
-#endif
 } __packed;
 
 struct iwl_alive_resp {
@@ -810,7 +814,7 @@ struct iwl_qosparam_cmd {
 #define        IWLAGN_STATION_COUNT    16
 
 #define        IWL_INVALID_STATION     255
-#define IWL_MAX_TID_COUNT      9
+#define IWL_MAX_TID_COUNT      8
 
 #define STA_FLG_TX_RATE_MSK            cpu_to_le32(1 << 2)
 #define STA_FLG_PWR_SAVE_MSK           cpu_to_le32(1 << 8)
@@ -931,8 +935,7 @@ struct iwl_addsta_cmd {
         * corresponding to bit (e.g. bit 5 controls TID 5).
         * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
        __le16 tid_disable_tx;
-
-       __le16  rate_n_flags;           /* 3945 only */
+       __le16 legacy_reserved;
 
        /* TID for which to add block-ack support.
         * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1162,8 +1165,7 @@ struct iwl_rx_mpdu_res_start {
  *
  * uCode handles retrying Tx when an ACK is expected but not received.
  * This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (agn).
+ * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn).
  *
  * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
  * This command must be executed after every RXON command, before Tx can occur.
@@ -1175,25 +1177,9 @@ struct iwl_rx_mpdu_res_start {
  * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
  * before this frame. if CTS-to-self required check
  * RXON_FLG_SELF_CTS_EN status.
- * unused in 3945/4965, used in 5000 series and after
  */
 #define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
 
-/*
- * 1: Use Request-To-Send protocol before this frame.
- * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
-
-/*
- * 1: Transmit Clear-To-Send to self before this frame.
- * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
- * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
-
 /* 1: Expect ACK from receiving station
  * 0: Don't expect ACK (MAC header's duration field s/b 0)
  * Set this for unicast frames, but not broadcast/multicast. */
@@ -1211,18 +1197,8 @@ struct iwl_rx_mpdu_res_start {
  * Set when Txing a block-ack request frame.  Also set TX_CMD_FLG_ACK_MSK. */
 #define TX_CMD_FLG_IMM_BA_RSP_MASK  cpu_to_le32(1 << 6)
 
-/*
- * 1: Frame requires full Tx-Op protection.
- * Set this if either RTS or CTS Tx Flag gets set.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
-
-/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices.
- * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
+/* Tx antenna selection field; reserved (0) for agn devices. */
 #define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
-#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
-#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
 
 /* 1: Ignore Bluetooth priority for this frame.
  * 0: Delay Tx until Bluetooth device is done (normal usage). */
@@ -1568,7 +1544,6 @@ struct iwl_compressed_ba_resp {
        __le64 bitmap;
        __le16 scd_flow;
        __le16 scd_ssn;
-       /* following only for 5000 series and up */
        u8 txed;        /* number of frames sent */
        u8 txed_2_done; /* number of frames acked */
 } __packed;
@@ -1670,7 +1645,7 @@ struct iwl_link_qual_agg_params {
 /*
  * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
  *
- * For agn devices only; 3945 uses REPLY_RATE_SCALE.
+ * For agn devices
  *
  * Each station in the agn device's internal station table has its own table
  * of 16
@@ -1919,7 +1894,7 @@ struct iwl_link_quality_cmd {
 /*
  * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
  *
- * 3945 and agn devices support hardware handshake with Bluetooth device on
+ * agn devices support hardware handshake with Bluetooth device on
  * same platform.  Bluetooth device alerts wireless device when it will Tx;
  * wireless device can delay or kill its own Tx to accommodate.
  */
@@ -2203,8 +2178,8 @@ struct iwl_spectrum_notification {
 
 struct iwl_powertable_cmd {
        __le16 flags;
-       u8 keep_alive_seconds;          /* 3945 reserved */
-       u8 debug_flags;                 /* 3945 reserved */
+       u8 keep_alive_seconds;
+       u8 debug_flags;
        __le32 rx_data_timeout;
        __le32 tx_data_timeout;
        __le32 sleep_interval[IWL_POWER_VEC_SIZE];
@@ -2325,9 +2300,9 @@ struct iwl_scan_channel {
 /**
  * struct iwl_ssid_ie - directed scan network information element
  *
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
- * each channel may select different ssids from among the 20 (4) entries.
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
  * SSID IEs get transmitted in reverse order of entry.
  */
 struct iwl_ssid_ie {
@@ -2336,7 +2311,6 @@ struct iwl_ssid_ie {
        u8 ssid[32];
 } __packed;
 
-#define PROBE_OPTION_MAX_3945          4
 #define PROBE_OPTION_MAX               20
 #define TX_CMD_LIFE_TIME_INFINITE      cpu_to_le32(0xFFFFFFFF)
 #define IWL_GOOD_CRC_TH_DISABLED       0
@@ -2417,8 +2391,6 @@ struct iwl_scan_cmd {
                                 * channel */
        __le32 suspend_time;    /* pause scan this long (in "extended beacon
                                 * format") when returning to service chnl:
-                                * 3945; 31:24 # beacons, 19:0 additional usec,
-                                * 4965; 31:22 # beacons, 21:0 additional usec.
                                 */
        __le32 flags;           /* RXON_FLG_* */
        __le32 filter_flags;    /* RXON_FILTER_* */
@@ -2734,7 +2706,7 @@ struct statistics_div {
 
 struct statistics_general_common {
        __le32 temperature;   /* radio temperature */
-       __le32 temperature_m; /* for 5000 and up, this is radio voltage */
+       __le32 temperature_m; /* radio voltage */
        struct statistics_dbg dbg;
        __le32 sleep_time;
        __le32 slots_out;
index f9e9170..3b6f48b 100644 (file)
@@ -836,19 +836,6 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
 }
 #endif
 
-static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
-{
-       unsigned long flags;
-       struct iwl_notification_wait *wait_entry;
-
-       spin_lock_irqsave(&priv->notif_wait_lock, flags);
-       list_for_each_entry(wait_entry, &priv->notif_waits, list)
-               wait_entry->aborted = true;
-       spin_unlock_irqrestore(&priv->notif_wait_lock, flags);
-
-       wake_up_all(&priv->notif_waitq);
-}
-
 void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
 {
        unsigned int reload_msec;
@@ -860,7 +847,7 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
        /* Cancel currently queued command. */
        clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
 
-       iwlagn_abort_notification_waits(priv);
+       iwl_abort_notification_waits(priv->shrd);
 
        /* Keep the restart process from trying to send host
         * commands by clearing the ready bit */
@@ -1505,11 +1492,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
 {
        unsigned int timeout = priv->cfg->base_params->wd_timeout;
 
-       if (timeout && !iwlagn_mod_params.wd_disable)
-               mod_timer(&priv->watchdog,
-                         jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
-       else
-               del_timer(&priv->watchdog);
+       if (!iwlagn_mod_params.wd_disable) {
+               /* use system default */
+               if (timeout && !priv->cfg->base_params->wd_disable)
+                       mod_timer(&priv->watchdog,
+                               jiffies +
+                               msecs_to_jiffies(IWL_WD_TICK(timeout)));
+               else
+                       del_timer(&priv->watchdog);
+       } else {
+               /* module parameter overwrite default configuration */
+               if (timeout && iwlagn_mod_params.wd_disable == 2)
+                       mod_timer(&priv->watchdog,
+                               jiffies +
+                               msecs_to_jiffies(IWL_WD_TICK(timeout)));
+               else
+                       del_timer(&priv->watchdog);
+       }
 }
 
 /**
index fa47f75..6da53a3 100644 (file)
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
  * @shadow_reg_enable: HW shadhow register bit
  * @no_idle_support: do not support idle mode
  * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * wd_disable: disable watchdog timer
  */
 struct iwl_base_params {
        int eeprom_size;
@@ -134,6 +135,7 @@ struct iwl_base_params {
        const bool shadow_reg_enable;
        const bool no_idle_support;
        const bool hd_v2;
+       const bool wd_disable;
 };
 /*
  * @advanced_bt_coexist: support advanced bt coexist
@@ -184,8 +186,9 @@ struct iwl_ht_params {
  * @ht_params: point to ht patameters
  * @bt_params: pointer to bt parameters
  * @pa_type: used by 6000 series only to identify the type of Power Amplifier
- * @need_dc_calib: need to perform init dc calibration
  * @need_temp_offset_calib: need to perform temperature offset calibration
+ * @no_xtal_calib: some devices do not need crystal calibration data,
+ *     don't send it to those
  * @scan_antennas: available antenna for scan operation
  * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
  * @adv_pm: advance power management
@@ -222,8 +225,8 @@ struct iwl_cfg {
        struct iwl_ht_params *ht_params;
        struct iwl_bt_params *bt_params;
        enum iwl_pa_type pa_type;         /* if used set to IWL_PA_SYSTEM */
-       const bool need_dc_calib;         /* if used set to true */
        const bool need_temp_offset_calib; /* if used set to true */
+       const bool no_xtal_calib;
        u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
        enum iwl_led_mode led_mode;
        const bool adv_pm;
index 44a7bdd..f8fc239 100644 (file)
@@ -134,48 +134,43 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
  */
 
 /* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO            (1 << 0)
-#define IWL_DL_MAC80211                (1 << 1)
-#define IWL_DL_HCMD            (1 << 2)
-#define IWL_DL_STATE           (1 << 3)
+#define IWL_DL_INFO            0x00000001
+#define IWL_DL_MAC80211                0x00000002
+#define IWL_DL_HCMD            0x00000004
+#define IWL_DL_STATE           0x00000008
 /* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP         (1 << 4)
-#define IWL_DL_HCMD_DUMP       (1 << 5)
-#define IWL_DL_EEPROM          (1 << 6)
-#define IWL_DL_RADIO           (1 << 7)
+#define IWL_DL_EEPROM          0x00000040
+#define IWL_DL_RADIO           0x00000080
 /* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER           (1 << 8)
-#define IWL_DL_TEMP            (1 << 9)
-/* reserved (1 << 10) */
-#define IWL_DL_SCAN            (1 << 11)
+#define IWL_DL_POWER           0x00000100
+#define IWL_DL_TEMP            0x00000200
+#define IWL_DL_SCAN            0x00000800
 /* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC           (1 << 12)
-#define IWL_DL_DROP            (1 << 13)
-/* reserved (1 << 14) */
-#define IWL_DL_COEX            (1 << 15)
+#define IWL_DL_ASSOC           0x00001000
+#define IWL_DL_DROP            0x00002000
+#define IWL_DL_COEX            0x00008000
 /* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW              (1 << 16)
-#define IWL_DL_RF_KILL         (1 << 17)
-#define IWL_DL_FW_ERRORS       (1 << 18)
-#define IWL_DL_LED             (1 << 19)
+#define IWL_DL_FW              0x00010000
+#define IWL_DL_RF_KILL         0x00020000
+#define IWL_DL_FW_ERRORS       0x00040000
+#define IWL_DL_LED             0x00080000
 /* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE            (1 << 20)
-#define IWL_DL_CALIB           (1 << 21)
-#define IWL_DL_WEP             (1 << 22)
-#define IWL_DL_TX              (1 << 23)
+#define IWL_DL_RATE            0x00100000
+#define IWL_DL_CALIB           0x00200000
+#define IWL_DL_WEP             0x00400000
+#define IWL_DL_TX              0x00800000
 /* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX              (1 << 24)
-#define IWL_DL_ISR             (1 << 25)
-#define IWL_DL_HT              (1 << 26)
+#define IWL_DL_RX              0x01000000
+#define IWL_DL_ISR             0x02000000
+#define IWL_DL_HT              0x04000000
 /* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H             (1 << 28)
-#define IWL_DL_STATS           (1 << 29)
-#define IWL_DL_TX_REPLY                (1 << 30)
-#define IWL_DL_TX_QUEUES       (1 << 31)
+#define IWL_DL_11H             0x10000000
+#define IWL_DL_STATS           0x20000000
+#define IWL_DL_TX_REPLY                0x40000000
+#define IWL_DL_TX_QUEUES       0x80000000
 
 #define IWL_DEBUG_INFO(p, f, a...)     IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
 #define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...)  IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
 #define IWL_DEBUG_TEMP(p, f, a...)     IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
 #define IWL_DEBUG_SCAN(p, f, a...)     IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
 #define IWL_DEBUG_RX(p, f, a...)       IWL_DEBUG(p, IWL_DL_RX, f, ## a)
@@ -184,7 +179,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
 #define IWL_DEBUG_LED(p, f, a...)      IWL_DEBUG(p, IWL_DL_LED, f, ## a)
 #define IWL_DEBUG_WEP(p, f, a...)      IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
 #define IWL_DEBUG_HC(p, f, a...)       IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...)  IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
 #define IWL_DEBUG_EEPROM(p, f, a...)   IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
 #define IWL_DEBUG_CALIB(p, f, a...)    IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
 #define IWL_DEBUG_FW(p, f, a...)       IWL_DEBUG(p, IWL_DL_FW, f, ## a)
@@ -206,8 +200,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
 #define IWL_DEBUG_STATS_LIMIT(p, f, a...)      \
                IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
 #define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
-               IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
 #define IWL_DEBUG_TX_QUEUES(p, f, a...)        IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
 #define IWL_DEBUG_RADIO(p, f, a...)    IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
 #define IWL_DEBUG_POWER(p, f, a...)    IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
index 68b04f5..6bf6845 100644 (file)
@@ -234,11 +234,12 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
 
        /* default is to dump the entire data segment */
        if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+               struct iwl_trans *trans = trans(priv);
                priv->dbgfs_sram_offset = 0x800000;
-               if (priv->ucode_type == IWL_UCODE_INIT)
-                       priv->dbgfs_sram_len = trans(priv)->ucode_init.data.len;
+               if (trans->shrd->ucode_type == IWL_UCODE_INIT)
+                       priv->dbgfs_sram_len = trans->ucode_init.data.len;
                else
-                       priv->dbgfs_sram_len = trans(priv)->ucode_rt.data.len;
+                       priv->dbgfs_sram_len = trans->ucode_rt.data.len;
        }
        len = priv->dbgfs_sram_len;
 
@@ -415,7 +416,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
                return -ENODATA;
        }
 
-       ptr = priv->eeprom;
+       ptr = priv->shrd->eeprom;
        if (!ptr) {
                IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
                return -ENOMEM;
@@ -427,7 +428,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
                IWL_ERR(priv, "Can not allocate Buffer\n");
                return -ENOMEM;
        }
-       eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
+       eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
        pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
                        "version: 0x%x\n",
                        (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
index 556e4a2..69ecf6e 100644 (file)
@@ -60,11 +60,10 @@ struct iwl_tx_queue;
 
 /* Default noise level to report when noise measurement is not available.
  *   This may be because we're:
- *   1)  Not associated (4965, no beacon statistics being sent to driver)
+ *   1)  Not associated  no beacon statistics being sent to driver)
  *   2)  Scanning (noise measurement does not apply to associated channel)
- *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
  * Use default noise value of -127 ... this is below the range of measurable
- *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ *   Rx dBm for all agn devices, so it can indicate "unmeasurable" to user.
  *   Also, -127 works better than 0 when averaging frames with/without
  *   noise info (e.g. averaging might be done in app); measured dBm values are
  *   always negative ... using a negative value as the default keeps all
@@ -441,29 +440,6 @@ enum iwlagn_chain_noise_state {
        IWL_CHAIN_NOISE_DONE,
 };
 
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
-       IWL_CALIB_XTAL,
-       IWL_CALIB_DC,
-       IWL_CALIB_LO,
-       IWL_CALIB_TX_IQ,
-       IWL_CALIB_TX_IQ_PERD,
-       IWL_CALIB_BASE_BAND,
-       IWL_CALIB_TEMP_OFFSET,
-       IWL_CALIB_MAX
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
-       void *buf;
-       size_t buf_len;
-};
-
 /* Sensitivity calib data */
 struct iwl_sensitivity_data {
        u32 auto_corr_ofdm;
@@ -703,35 +679,6 @@ struct iwl_force_reset {
  */
 #define IWLAGN_EXT_BEACON_TIME_POS     22
 
-/**
- * struct iwl_notification_wait - notification wait entry
- * @list: list head for global list
- * @fn: function called with the notification
- * @cmd: command ID
- *
- * This structure is not used directly, to wait for a
- * notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
- * parameters. Then do whatever will cause the ucode
- * to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
- *
- * Each notification is one-shot. If at some point we
- * need to support multi-shot notifications (which
- * can't be allocated on the stack) we need to modify
- * the code for them.
- */
-struct iwl_notification_wait {
-       struct list_head list;
-
-       void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt,
-                  void *data);
-       void *fn_data;
-
-       u8 cmd;
-       bool triggered, aborted;
-};
-
 struct iwl_rxon_context {
        struct ieee80211_vif *vif;
 
@@ -794,7 +741,7 @@ enum iwl_scan_type {
        IWL_SCAN_ROC,
 };
 
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
 struct iwl_testmode_trace {
        u32 buff_size;
        u32 total_size;
@@ -804,6 +751,12 @@ struct iwl_testmode_trace {
        dma_addr_t dma_addr;
        bool trace_enabled;
 };
+struct iwl_testmode_sram {
+       u32 buff_size;
+       u32 num_chunks;
+       u8 *buff_addr;
+       bool sram_readed;
+};
 #endif
 
 struct iwl_wipan_noa_data {
@@ -868,9 +821,6 @@ struct iwl_priv {
        s32 temperature;        /* Celsius */
        s32 last_temperature;
 
-       /* init calibration results */
-       struct iwl_calib_result calib_results[IWL_CALIB_MAX];
-
        struct iwl_wipan_noa_data __rcu *noa_data;
 
        /* Scan related variables */
@@ -897,18 +847,12 @@ struct iwl_priv {
        u32 ucode_ver;                  /* version of ucode, copy of
                                           iwl_ucode.ver */
 
-       enum iwl_ucode_type ucode_type;
        char firmware_name[25];
 
        struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
 
        __le16 switch_channel;
 
-       struct {
-               u32 error_event_table;
-               u32 log_event_table;
-       } device_pointers;
-
        u16 active_rate;
 
        u8 start_calib;
@@ -942,10 +886,6 @@ struct iwl_priv {
        /* Indication if ieee80211_ops->open has been called */
        u8 is_open;
 
-       /* eeprom -- this is in the card's little endian byte order */
-       u8 *eeprom;
-       struct iwl_eeprom_calib_info *calib_info;
-
        enum nl80211_iftype iw_mode;
 
        /* Last Rx'd beacon timestamp */
@@ -1001,10 +941,6 @@ struct iwl_priv {
        /* counts reply_tx error */
        struct reply_tx_error_statistics reply_tx_stats;
        struct reply_agg_tx_error_statistics reply_agg_tx_stats;
-       /* notification wait support */
-       struct list_head notif_waits;
-       spinlock_t notif_wait_lock;
-       wait_queue_head_t notif_waitq;
 
        /* remain-on-channel offload support */
        struct ieee80211_channel *hw_roc_channel;
@@ -1082,8 +1018,9 @@ struct iwl_priv {
        struct led_classdev led;
        unsigned long blink_on, blink_off;
        bool led_registered;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
        struct iwl_testmode_trace testmode_trace;
+       struct iwl_testmode_sram testmode_sram;
        u32 tm_fixed_rate;
 #endif
 
index dcada08..6fcc7d5 100644 (file)
@@ -215,11 +215,11 @@ static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
        return ret;
 }
 
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset)
 {
-       if (!priv->eeprom)
+       if (!shrd->eeprom)
                return 0;
-       return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+       return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8);
 }
 
 int iwl_eeprom_check_version(struct iwl_priv *priv)
@@ -227,8 +227,8 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
        u16 eeprom_ver;
        u16 calib_ver;
 
-       eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
-       calib_ver = iwlagn_eeprom_calib_version(priv);
+       eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
+       calib_ver = iwl_eeprom_calib_version(priv->shrd);
 
        if (eeprom_ver < priv->cfg->eeprom_ver ||
            calib_ver < priv->cfg->eeprom_calib_ver)
@@ -249,11 +249,12 @@ err:
 
 int iwl_eeprom_check_sku(struct iwl_priv *priv)
 {
+       struct iwl_shared *shrd = priv->shrd;
        u16 radio_cfg;
 
        if (!priv->cfg->sku) {
                /* not using sku overwrite */
-               priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
+               priv->cfg->sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
                if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
                    !priv->cfg->ht_params) {
                        IWL_ERR(priv, "Invalid 11n configuration\n");
@@ -269,7 +270,7 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
 
        if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
                /* not using .cfg overwrite */
-               radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+               radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
                priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
                priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
                if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
@@ -289,9 +290,9 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
        return 0;
 }
 
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac)
 {
-       const u8 *addr = iwl_eeprom_query_addr(priv,
+       const u8 *addr = iwl_eeprom_query_addr(shrd,
                                        EEPROM_MAC_ADDRESS);
        memcpy(mac, addr, ETH_ALEN);
 }
@@ -582,6 +583,7 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
 
 void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
 {
+       struct iwl_shared *shrd = priv->shrd;
        struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
        int idx, entries;
        __le16 *txp_len;
@@ -590,10 +592,10 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
        BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
 
        /* the length is in 16-bit words, but we want entries */
-       txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
+       txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS);
        entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
 
-       txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
+       txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS);
 
        for (idx = 0; idx < entries; idx++) {
                txp = &txp_array[idx];
@@ -646,12 +648,13 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
 /**
  * iwl_eeprom_init - read EEPROM contents
  *
- * Load the EEPROM contents from adapter into priv->eeprom
+ * Load the EEPROM contents from adapter into shrd->eeprom
  *
  * NOTE:  This routine uses the non-debug IO access functions.
  */
 int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
 {
+       struct iwl_shared *shrd = priv->shrd;
        __le16 *e;
        u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
        int sz;
@@ -666,12 +669,12 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
        /* allocate eeprom */
        sz = priv->cfg->base_params->eeprom_size;
        IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
-       priv->eeprom = kzalloc(sz, GFP_KERNEL);
-       if (!priv->eeprom) {
+       shrd->eeprom = kzalloc(sz, GFP_KERNEL);
+       if (!shrd->eeprom) {
                ret = -ENOMEM;
                goto alloc_err;
        }
-       e = (__le16 *)priv->eeprom;
+       e = (__le16 *)shrd->eeprom;
 
        iwl_apm_init(priv);
 
@@ -746,7 +749,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
        IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
                       (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
                       ? "OTP" : "EEPROM",
-                      iwl_eeprom_query16(priv, EEPROM_VERSION));
+                      iwl_eeprom_query16(shrd, EEPROM_VERSION));
 
        ret = 0;
 done:
@@ -754,17 +757,17 @@ done:
 
 err:
        if (ret)
-               iwl_eeprom_free(priv);
+               iwl_eeprom_free(priv->shrd);
        /* Reset chip to save power until we load uCode during "up". */
        iwl_apm_stop(priv);
 alloc_err:
        return ret;
 }
 
-void iwl_eeprom_free(struct iwl_priv *priv)
+void iwl_eeprom_free(struct iwl_shared *shrd)
 {
-       kfree(priv->eeprom);
-       priv->eeprom = NULL;
+       kfree(shrd->eeprom);
+       shrd->eeprom = NULL;
 }
 
 static void iwl_init_band_reference(const struct iwl_priv *priv,
@@ -772,49 +775,50 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
                        const struct iwl_eeprom_channel **eeprom_ch_info,
                        const u8 **eeprom_ch_index)
 {
+       struct iwl_shared *shrd = priv->shrd;
        u32 offset = priv->cfg->lib->
                        eeprom_ops.regulatory_bands[eep_band - 1];
        switch (eep_band) {
        case 1:         /* 2.4GHz band */
                *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
                *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_eeprom_query_addr(priv, offset);
+                               iwl_eeprom_query_addr(shrd, offset);
                *eeprom_ch_index = iwl_eeprom_band_1;
                break;
        case 2:         /* 4.9GHz band */
                *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
                *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_eeprom_query_addr(priv, offset);
+                               iwl_eeprom_query_addr(shrd, offset);
                *eeprom_ch_index = iwl_eeprom_band_2;
                break;
        case 3:         /* 5.2GHz band */
                *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
                *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_eeprom_query_addr(priv, offset);
+                               iwl_eeprom_query_addr(shrd, offset);
                *eeprom_ch_index = iwl_eeprom_band_3;
                break;
        case 4:         /* 5.5GHz band */
                *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
                *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_eeprom_query_addr(priv, offset);
+                               iwl_eeprom_query_addr(shrd, offset);
                *eeprom_ch_index = iwl_eeprom_band_4;
                break;
        case 5:         /* 5.7GHz band */
                *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
                *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_eeprom_query_addr(priv, offset);
+                               iwl_eeprom_query_addr(shrd, offset);
                *eeprom_ch_index = iwl_eeprom_band_5;
                break;
        case 6:         /* 2.4GHz ht40 channels */
                *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
                *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_eeprom_query_addr(priv, offset);
+                               iwl_eeprom_query_addr(shrd, offset);
                *eeprom_ch_index = iwl_eeprom_band_6;
                break;
        case 7:         /* 5 GHz ht40 channels */
                *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
                *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_eeprom_query_addr(priv, offset);
+                               iwl_eeprom_query_addr(shrd, offset);
                *eeprom_ch_index = iwl_eeprom_band_7;
                break;
        default:
@@ -1064,7 +1068,7 @@ void iwl_rf_config(struct iwl_priv *priv)
 {
        u16 radio_cfg;
 
-       radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+       radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG);
 
        /* write radio config values to register */
        if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
index c94747e..9fa937e 100644 (file)
@@ -66,6 +66,7 @@
 #include <net/mac80211.h>
 
 struct iwl_priv;
+struct iwl_shared;
 
 /*
  * EEPROM access time values:
@@ -305,11 +306,11 @@ struct iwl_eeprom_ops {
 
 
 int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
-void iwl_eeprom_free(struct iwl_priv *priv);
+void iwl_eeprom_free(struct iwl_shared *shrd);
 int  iwl_eeprom_check_version(struct iwl_priv *priv);
 int  iwl_eeprom_check_sku(struct iwl_priv *priv);
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset);
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset);
 int iwl_init_channel_map(struct iwl_priv *priv);
 void iwl_free_channel_map(struct iwl_priv *priv);
 const struct iwl_channel_info *iwl_get_channel_info(
index 05b1f0d..e3944f4 100644 (file)
@@ -427,7 +427,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
        iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
                          CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
 
-       base = priv->device_pointers.error_event_table;
+       base = priv->shrd->device_pointers.error_event_table;
        if (iwlagn_hw_valid_rtc_data_addr(base)) {
                spin_lock_irqsave(&bus(priv)->reg_lock, flags);
                ret = iwl_grab_nic_access_silent(bus(priv));
@@ -481,15 +481,11 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct iwl_priv *priv = hw->priv;
 
-       IWL_DEBUG_MACDUMP(priv, "enter\n");
-
        IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
                     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
        if (iwlagn_tx_skb(priv, skb))
                dev_kfree_skb_any(skb);
-
-       IWL_DEBUG_MACDUMP(priv, "leave\n");
 }
 
 static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -521,6 +517,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                return -EOPNOTSUPP;
        }
 
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+               /* fall through */
+       case WLAN_CIPHER_SUITE_CCMP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+               break;
+       default:
+               break;
+       }
+
        /*
         * We could program these keys into the hardware as well, but we
         * don't expect much multicast traffic in IBSS and having keys
@@ -804,21 +811,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
 
        /* Configure HT40 channels */
        ctx->ht.enabled = conf_is_ht(conf);
-       if (ctx->ht.enabled) {
-               if (conf_is_ht40_minus(conf)) {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-                       ctx->ht.is_40mhz = true;
-               } else if (conf_is_ht40_plus(conf)) {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-                       ctx->ht.is_40mhz = true;
-               } else {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
-                       ctx->ht.is_40mhz = false;
-               }
-       } else
+       if (ctx->ht.enabled)
+               iwlagn_config_ht40(conf, ctx);
+       else
                ctx->ht.is_40mhz = false;
 
        if ((le16_to_cpu(ctx->staging.channel) != ch))
@@ -1053,6 +1048,9 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
        int ret;
        u8 sta_id;
 
+       if (ctx->ctxid != IWL_RXON_CTX_PAN)
+               return 0;
+
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->shrd->mutex);
 
@@ -1102,6 +1100,9 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
        struct iwl_rxon_context *ctx = vif_priv->ctx;
 
+       if (ctx->ctxid != IWL_RXON_CTX_PAN)
+               return;
+
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->shrd->mutex);
 
index 1f7a93c..29a7284 100644 (file)
@@ -97,6 +97,7 @@
 struct iwl_cfg;
 struct iwl_bus;
 struct iwl_priv;
+struct iwl_trans;
 struct iwl_sensitivity_ranges;
 struct iwl_trans_ops;
 
@@ -120,7 +121,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
  * @restart_fw: restart firmware, default = 1
  * @plcp_check: enable plcp health check, default = true
  * @ack_check: disable ack health check, default = false
- * @wd_disable: enable stuck queue check, default = false
+ * @wd_disable: enable stuck queue check, default = 0
  * @bt_coex_active: enable bt coex, default = true
  * @led_mode: system default, default = 0
  * @no_sleep_autoadjust: disable autoadjust, default = true
@@ -141,7 +142,7 @@ struct iwl_mod_params {
        int restart_fw;
        bool plcp_check;
        bool ack_check;
-       bool wd_disable;
+       int  wd_disable;
        bool bt_coex_active;
        int led_mode;
        bool no_sleep_autoadjust;
@@ -174,7 +175,6 @@ struct iwl_mod_params {
  * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
  *     relevant for 1000, 6000 and up
  * @wd_timeout: TX queues watchdog timeout
- * @calib_init_cfg: setup initial calibrations for the hw
  * @calib_rt_cfg: setup runtime calibrations for the hw
  * @struct iwl_sensitivity_ranges: range of sensitivity values
  */
@@ -195,7 +195,6 @@ struct iwl_hw_params {
        u32 ct_kill_exit_threshold;
        unsigned int wd_timeout;
 
-       u32 calib_init_cfg;
        u32 calib_rt_cfg;
        const struct iwl_sensitivity_ranges *sens;
 };
@@ -258,6 +257,52 @@ struct iwl_tid_data {
        struct iwl_ht_agg agg;
 };
 
+/**
+ * enum iwl_ucode_type
+ *
+ * The type of ucode currently loaded on the hardware.
+ *
+ * @IWL_UCODE_NONE: No ucode loaded
+ * @IWL_UCODE_REGULAR: Normal runtime ucode
+ * @IWL_UCODE_INIT: Initial ucode
+ * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
+ */
+enum iwl_ucode_type {
+       IWL_UCODE_NONE,
+       IWL_UCODE_REGULAR,
+       IWL_UCODE_INIT,
+       IWL_UCODE_WOWLAN,
+};
+
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+       struct list_head list;
+
+       void (*fn)(struct iwl_trans *trans, struct iwl_rx_packet *pkt,
+                  void *data);
+       void *fn_data;
+
+       u8 cmd;
+       bool triggered, aborted;
+};
+
 /**
  * struct iwl_shared - shared fields for all the layers of the driver
  *
@@ -275,6 +320,11 @@ struct iwl_tid_data {
  * @sta_lock: protects the station table.
  *     If lock and sta_lock are needed, lock must be acquired first.
  * @mutex:
+ * @ucode_type: indicator of loaded ucode image
+ * @notif_waits: things waiting for notification
+ * @notif_wait_lock: lock protecting notification
+ * @notif_waitq: head of notification wait queue
+ * @device_pointers: pointers to ucode event tables
  */
 struct iwl_shared {
 #ifdef CONFIG_IWLWIFI_DEBUG
@@ -302,6 +352,23 @@ struct iwl_shared {
        struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
 
        wait_queue_head_t wait_command_queue;
+
+       /* eeprom -- this is in the card's little endian byte order */
+       u8 *eeprom;
+
+       /* ucode related variables */
+       enum iwl_ucode_type ucode_type;
+
+       /* notification wait support */
+       struct list_head notif_waits;
+       spinlock_t notif_wait_lock;
+       wait_queue_head_t notif_waitq;
+
+       struct {
+               u32 error_event_table;
+               u32 log_event_table;
+       } device_pointers;
+
 };
 
 /*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
@@ -445,6 +512,24 @@ bool iwl_check_for_ct_kill(struct iwl_priv *priv);
 void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac);
 void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac);
 
+/* notification wait support */
+void iwl_abort_notification_waits(struct iwl_shared *shrd);
+void __acquires(wait_entry)
+iwl_init_notification_wait(struct iwl_shared *shrd,
+                             struct iwl_notification_wait *wait_entry,
+                             u8 cmd,
+                             void (*fn)(struct iwl_trans *trans,
+                                        struct iwl_rx_packet *pkt,
+                                        void *data),
+                             void *fn_data);
+int __must_check __releases(wait_entry)
+iwl_wait_notification(struct iwl_shared *shrd,
+                        struct iwl_notification_wait *wait_entry,
+                        unsigned long timeout);
+void __releases(wait_entry)
+iwl_remove_notification(struct iwl_shared *shrd,
+                          struct iwl_notification_wait *wait_entry);
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void iwl_reset_traffic_log(struct iwl_priv *priv);
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
deleted file mode 100644 (file)
index e3882d0..0000000
+++ /dev/null
@@ -1,751 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <net/net_namespace.h>
-#include <linux/netdevice.h>
-#include <net/cfg80211.h>
-#include <net/mac80211.h>
-#include <net/netlink.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-io.h"
-#include "iwl-agn.h"
-#include "iwl-testmode.h"
-#include "iwl-trans.h"
-
-/* The TLVs used in the gnl message policy between the kernel module and
- * user space application. iwl_testmode_gnl_msg_policy is to be carried
- * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
- * See iwl-testmode.h
- */
-static
-struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
-       [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
-
-       [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
-       [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
-
-       [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
-       [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
-       [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
-
-       [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
-       [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
-
-       [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
-
-       [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
-       [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
-       [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
-
-       [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
-
-       [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
-};
-
-/*
- * See the struct iwl_rx_packet in iwl-commands.h for the format of the
- * received events from the device
- */
-static inline int get_event_length(struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       if (pkt)
-               return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-       else
-               return 0;
-}
-
-
-/*
- * This function multicasts the spontaneous messages from the device to the
- * user space. It is invoked whenever there is a received messages
- * from the device. This function is called within the ISR of the rx handlers
- * in iwlagn driver.
- *
- * The parsing of the message content is left to the user space application,
- * The message content is treated as unattacked raw data and is encapsulated
- * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space.
- *
- * @priv: the instance of iwlwifi device
- * @rxb: pointer to rx data content received by the ISR
- *
- * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[].
- * For the messages multicasting to the user application, the mandatory
- * TLV fields are :
- *     IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
- *     IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content
- */
-
-static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct ieee80211_hw *hw = priv->hw;
-       struct sk_buff *skb;
-       void *data;
-       int length;
-
-       data = (void *)rxb_addr(rxb);
-       length = get_event_length(rxb);
-
-       if (!data || length == 0)
-               return;
-
-       skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length,
-                                                               GFP_ATOMIC);
-       if (skb == NULL) {
-               IWL_DEBUG_INFO(priv,
-                        "Run out of memory for messages to user space ?\n");
-               return;
-       }
-       NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
-       NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data);
-       cfg80211_testmode_event(skb, GFP_ATOMIC);
-       return;
-
-nla_put_failure:
-       kfree_skb(skb);
-       IWL_DEBUG_INFO(priv, "Ouch, overran buffer, check allocation!\n");
-}
-
-void iwl_testmode_init(struct iwl_priv *priv)
-{
-       priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
-       priv->testmode_trace.trace_enabled = false;
-}
-
-static void iwl_trace_cleanup(struct iwl_priv *priv)
-{
-       if (priv->testmode_trace.trace_enabled) {
-               if (priv->testmode_trace.cpu_addr &&
-                   priv->testmode_trace.dma_addr)
-                       dma_free_coherent(bus(priv)->dev,
-                                       priv->testmode_trace.total_size,
-                                       priv->testmode_trace.cpu_addr,
-                                       priv->testmode_trace.dma_addr);
-               priv->testmode_trace.trace_enabled = false;
-               priv->testmode_trace.cpu_addr = NULL;
-               priv->testmode_trace.trace_addr = NULL;
-               priv->testmode_trace.dma_addr = 0;
-               priv->testmode_trace.buff_size = 0;
-               priv->testmode_trace.total_size = 0;
-       }
-}
-
-
-void iwl_testmode_cleanup(struct iwl_priv *priv)
-{
-       iwl_trace_cleanup(priv);
-}
-
-/*
- * This function handles the user application commands to the ucode.
- *
- * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and
- * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the
- * host command to the ucode.
- *
- * If any mandatory field is missing, -ENOMSG is replied to the user space
- * application; otherwise, the actual execution result of the host command to
- * ucode is replied.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_host_cmd cmd;
-
-       memset(&cmd, 0, sizeof(struct iwl_host_cmd));
-
-       if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
-           !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
-               IWL_DEBUG_INFO(priv,
-                       "Error finding ucode command mandatory fields\n");
-               return -ENOMSG;
-       }
-
-       cmd.flags = CMD_ON_DEMAND;
-       cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
-       cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
-       cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
-       cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
-       IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
-                               " len %d\n", cmd.id, cmd.flags, cmd.len[0]);
-       /* ok, let's submit the command to ucode */
-       return iwl_trans_send_cmd(trans(priv), &cmd);
-}
-
-
-/*
- * This function handles the user application commands for register access.
- *
- * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
- * handlers respectively.
- *
- * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
- * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32,
- * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating
- * the success of the command execution.
- *
- * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read
- * value is returned with IWL_TM_ATTR_REG_VALUE32.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
-{
-       struct iwl_priv *priv = hw->priv;
-       u32 ofs, val32;
-       u8 val8;
-       struct sk_buff *skb;
-       int status = 0;
-
-       if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
-               IWL_DEBUG_INFO(priv, "Error finding register offset\n");
-               return -ENOMSG;
-       }
-       ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
-       IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
-
-       switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
-       case IWL_TM_CMD_APP2DEV_REG_READ32:
-               val32 = iwl_read32(bus(priv), ofs);
-               IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
-
-               skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
-               if (!skb) {
-                       IWL_DEBUG_INFO(priv, "Error allocating memory\n");
-                       return -ENOMEM;
-               }
-               NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
-               status = cfg80211_testmode_reply(skb);
-               if (status < 0)
-                       IWL_DEBUG_INFO(priv,
-                                      "Error sending msg : %d\n", status);
-               break;
-       case IWL_TM_CMD_APP2DEV_REG_WRITE32:
-               if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
-                       IWL_DEBUG_INFO(priv,
-                                      "Error finding value to write\n");
-                       return -ENOMSG;
-               } else {
-                       val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
-                       IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
-                       iwl_write32(bus(priv), ofs, val32);
-               }
-               break;
-       case IWL_TM_CMD_APP2DEV_REG_WRITE8:
-               if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
-                       IWL_DEBUG_INFO(priv, "Error finding value to write\n");
-                       return -ENOMSG;
-               } else {
-                       val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
-                       IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
-                       iwl_write8(bus(priv), ofs, val8);
-               }
-               break;
-       default:
-               IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
-               return -ENOSYS;
-       }
-
-       return status;
-
-nla_put_failure:
-       kfree_skb(skb);
-       return -EMSGSIZE;
-}
-
-
-static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
-{
-       struct iwl_notification_wait calib_wait;
-       int ret;
-
-       iwlagn_init_notification_wait(priv, &calib_wait,
-                                     CALIBRATION_COMPLETE_NOTIFICATION,
-                                     NULL, NULL);
-       ret = iwlagn_init_alive_start(priv);
-       if (ret) {
-               IWL_DEBUG_INFO(priv,
-                       "Error configuring init calibration: %d\n", ret);
-               goto cfg_init_calib_error;
-       }
-
-       ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ);
-       if (ret)
-               IWL_DEBUG_INFO(priv, "Error detecting"
-                       " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
-       return ret;
-
-cfg_init_calib_error:
-       iwlagn_remove_notification(priv, &calib_wait);
-       return ret;
-}
-
-/*
- * This function handles the user application commands for driver.
- *
- * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
- * handlers respectively.
- *
- * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
- * value of the actual command execution is replied to the user application.
- *
- * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
- * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
- * IWL_TM_CMD_DEV2APP_SYNC_RSP.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct sk_buff *skb;
-       unsigned char *rsp_data_ptr = NULL;
-       int status = 0, rsp_data_len = 0;
-
-       switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
-       case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
-               rsp_data_ptr = (unsigned char *)priv->cfg->name;
-               rsp_data_len = strlen(priv->cfg->name);
-               skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
-                                                       rsp_data_len + 20);
-               if (!skb) {
-                       IWL_DEBUG_INFO(priv,
-                                      "Error allocating memory\n");
-                       return -ENOMEM;
-               }
-               NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
-                           IWL_TM_CMD_DEV2APP_SYNC_RSP);
-               NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP,
-                       rsp_data_len, rsp_data_ptr);
-               status = cfg80211_testmode_reply(skb);
-               if (status < 0)
-                       IWL_DEBUG_INFO(priv, "Error sending msg : %d\n",
-                                      status);
-               break;
-
-       case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
-               status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
-               if (status)
-                       IWL_DEBUG_INFO(priv,
-                               "Error loading init ucode: %d\n", status);
-               break;
-
-       case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
-               iwl_testmode_cfg_init_calib(priv);
-               iwl_trans_stop_device(trans(priv));
-               break;
-
-       case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
-               status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
-               if (status) {
-                       IWL_DEBUG_INFO(priv,
-                               "Error loading runtime ucode: %d\n", status);
-                       break;
-               }
-               status = iwl_alive_start(priv);
-               if (status)
-                       IWL_DEBUG_INFO(priv,
-                               "Error starting the device: %d\n", status);
-               break;
-
-       case IWL_TM_CMD_APP2DEV_GET_EEPROM:
-               if (priv->eeprom) {
-                       skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
-                               priv->cfg->base_params->eeprom_size + 20);
-                       if (!skb) {
-                               IWL_DEBUG_INFO(priv,
-                                      "Error allocating memory\n");
-                               return -ENOMEM;
-                       }
-                       NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
-                               IWL_TM_CMD_DEV2APP_EEPROM_RSP);
-                       NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
-                               priv->cfg->base_params->eeprom_size,
-                               priv->eeprom);
-                       status = cfg80211_testmode_reply(skb);
-                       if (status < 0)
-                               IWL_DEBUG_INFO(priv,
-                                              "Error sending msg : %d\n",
-                                              status);
-               } else
-                       return -EFAULT;
-               break;
-
-       case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
-               if (!tb[IWL_TM_ATTR_FIXRATE]) {
-                       IWL_DEBUG_INFO(priv,
-                                      "Error finding fixrate setting\n");
-                       return -ENOMSG;
-               }
-               priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
-               break;
-
-       default:
-               IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
-               return -ENOSYS;
-       }
-       return status;
-
-nla_put_failure:
-       kfree_skb(skb);
-       return -EMSGSIZE;
-}
-
-
-/*
- * This function handles the user application commands for uCode trace
- *
- * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
- * handlers respectively.
- *
- * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
- * value of the actual command execution is replied to the user application.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct sk_buff *skb;
-       int status = 0;
-       struct device *dev = bus(priv)->dev;
-
-       switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
-       case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
-               if (priv->testmode_trace.trace_enabled)
-                       return -EBUSY;
-
-               if (!tb[IWL_TM_ATTR_TRACE_SIZE])
-                       priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF;
-               else
-                       priv->testmode_trace.buff_size =
-                               nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
-               if (!priv->testmode_trace.buff_size)
-                       return -EINVAL;
-               if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN ||
-                   priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX)
-                       return -EINVAL;
-
-               priv->testmode_trace.total_size =
-                       priv->testmode_trace.buff_size + TRACE_BUFF_PADD;
-               priv->testmode_trace.cpu_addr =
-                       dma_alloc_coherent(dev,
-                                          priv->testmode_trace.total_size,
-                                          &priv->testmode_trace.dma_addr,
-                                          GFP_KERNEL);
-               if (!priv->testmode_trace.cpu_addr)
-                       return -ENOMEM;
-               priv->testmode_trace.trace_enabled = true;
-               priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
-                       priv->testmode_trace.cpu_addr, 0x100);
-               memset(priv->testmode_trace.trace_addr, 0x03B,
-                       priv->testmode_trace.buff_size);
-               skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
-                       sizeof(priv->testmode_trace.dma_addr) + 20);
-               if (!skb) {
-                       IWL_DEBUG_INFO(priv,
-                               "Error allocating memory\n");
-                       iwl_trace_cleanup(priv);
-                       return -ENOMEM;
-               }
-               NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR,
-                       sizeof(priv->testmode_trace.dma_addr),
-                       (u64 *)&priv->testmode_trace.dma_addr);
-               status = cfg80211_testmode_reply(skb);
-               if (status < 0) {
-                       IWL_DEBUG_INFO(priv,
-                                      "Error sending msg : %d\n",
-                                      status);
-               }
-               priv->testmode_trace.num_chunks =
-                       DIV_ROUND_UP(priv->testmode_trace.buff_size,
-                                    TRACE_CHUNK_SIZE);
-               break;
-
-       case IWL_TM_CMD_APP2DEV_END_TRACE:
-               iwl_trace_cleanup(priv);
-               break;
-       default:
-               IWL_DEBUG_INFO(priv, "Unknown testmode mem command ID\n");
-               return -ENOSYS;
-       }
-       return status;
-
-nla_put_failure:
-       kfree_skb(skb);
-       if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
-           IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
-               iwl_trace_cleanup(priv);
-       return -EMSGSIZE;
-}
-
-static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
-                                  struct sk_buff *skb,
-                                  struct netlink_callback *cb)
-{
-       struct iwl_priv *priv = hw->priv;
-       int idx, length;
-
-       if (priv->testmode_trace.trace_enabled &&
-           priv->testmode_trace.trace_addr) {
-               idx = cb->args[4];
-               if (idx >= priv->testmode_trace.num_chunks)
-                       return -ENOENT;
-               length = TRACE_CHUNK_SIZE;
-               if (((idx + 1) == priv->testmode_trace.num_chunks) &&
-                   (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE))
-                       length = priv->testmode_trace.buff_size %
-                               TRACE_CHUNK_SIZE;
-
-               NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
-                       priv->testmode_trace.trace_addr +
-                       (TRACE_CHUNK_SIZE * idx));
-               idx++;
-               cb->args[4] = idx;
-               return 0;
-       } else
-               return -EFAULT;
-
- nla_put_failure:
-       return -ENOBUFS;
-}
-
-/*
- * This function handles the user application switch ucode ownership.
- *
- * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
- * decide who the current owner of the uCode
- *
- * If the current owner is OWNERSHIP_TM, then the only host command
- * can deliver to uCode is from testmode, all the other host commands
- * will dropped.
- *
- * default driver is the owner of uCode in normal operational mode
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
-{
-       struct iwl_priv *priv = hw->priv;
-       u8 owner;
-
-       if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
-               IWL_DEBUG_INFO(priv, "Error finding ucode owner\n");
-               return -ENOMSG;
-       }
-
-       owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
-       if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM))
-               priv->shrd->ucode_owner = owner;
-       else {
-               IWL_DEBUG_INFO(priv, "Invalid owner\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-
-/* The testmode gnl message handler that takes the gnl message from the
- * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
- * invoke the corresponding handlers.
- *
- * This function is invoked when there is user space application sending
- * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
- * by nl80211.
- *
- * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
- * dispatching it to the corresponding handler.
- *
- * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
- * -ENOSYS is replied to the user application if the command is unknown;
- * Otherwise, the command is dispatched to the respective handler.
- *
- * @hw: ieee80211_hw object that represents the device
- * @data: pointer to user space message
- * @len: length in byte of @data
- */
-int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
-{
-       struct nlattr *tb[IWL_TM_ATTR_MAX];
-       struct iwl_priv *priv = hw->priv;
-       int result;
-
-       result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
-                       iwl_testmode_gnl_msg_policy);
-       if (result != 0) {
-               IWL_DEBUG_INFO(priv,
-                              "Error parsing the gnl message : %d\n", result);
-               return result;
-       }
-
-       /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
-       if (!tb[IWL_TM_ATTR_COMMAND]) {
-               IWL_DEBUG_INFO(priv, "Error finding testmode command type\n");
-               return -ENOMSG;
-       }
-       /* in case multiple accesses to the device happens */
-       mutex_lock(&priv->shrd->mutex);
-
-       switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
-       case IWL_TM_CMD_APP2DEV_UCODE:
-               IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
-               result = iwl_testmode_ucode(hw, tb);
-               break;
-       case IWL_TM_CMD_APP2DEV_REG_READ32:
-       case IWL_TM_CMD_APP2DEV_REG_WRITE32:
-       case IWL_TM_CMD_APP2DEV_REG_WRITE8:
-               IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
-               result = iwl_testmode_reg(hw, tb);
-               break;
-       case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
-       case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
-       case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
-       case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
-       case IWL_TM_CMD_APP2DEV_GET_EEPROM:
-       case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
-               IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
-               result = iwl_testmode_driver(hw, tb);
-               break;
-
-       case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
-       case IWL_TM_CMD_APP2DEV_END_TRACE:
-       case IWL_TM_CMD_APP2DEV_READ_TRACE:
-               IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n");
-               result = iwl_testmode_trace(hw, tb);
-               break;
-
-       case IWL_TM_CMD_APP2DEV_OWNERSHIP:
-               IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
-               result = iwl_testmode_ownership(hw, tb);
-               break;
-
-       default:
-               IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
-               result = -ENOSYS;
-               break;
-       }
-
-       mutex_unlock(&priv->shrd->mutex);
-       return result;
-}
-
-int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
-                     struct netlink_callback *cb,
-                     void *data, int len)
-{
-       struct nlattr *tb[IWL_TM_ATTR_MAX];
-       struct iwl_priv *priv = hw->priv;
-       int result;
-       u32 cmd;
-
-       if (cb->args[3]) {
-               /* offset by 1 since commands start at 0 */
-               cmd = cb->args[3] - 1;
-       } else {
-               result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
-                               iwl_testmode_gnl_msg_policy);
-               if (result) {
-                       IWL_DEBUG_INFO(priv,
-                              "Error parsing the gnl message : %d\n", result);
-                       return result;
-               }
-
-               /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
-               if (!tb[IWL_TM_ATTR_COMMAND]) {
-                       IWL_DEBUG_INFO(priv,
-                               "Error finding testmode command type\n");
-                       return -ENOMSG;
-               }
-               cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
-               cb->args[3] = cmd + 1;
-       }
-
-       /* in case multiple accesses to the device happens */
-       mutex_lock(&priv->shrd->mutex);
-       switch (cmd) {
-       case IWL_TM_CMD_APP2DEV_READ_TRACE:
-               IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
-               result = iwl_testmode_trace_dump(hw, tb, skb, cb);
-               break;
-       default:
-               result = -EINVAL;
-               break;
-       }
-
-       mutex_unlock(&priv->shrd->mutex);
-       return result;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
new file mode 100644 (file)
index 0000000..a874eb7
--- /dev/null
@@ -0,0 +1,970 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <net/net_namespace.h>
+#include <linux/netdevice.h>
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+#include <net/netlink.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "iwl-agn.h"
+#include "iwl-testmode.h"
+#include "iwl-trans.h"
+#include "iwl-bus.h"
+
+/* The TLVs used in the gnl message policy between the kernel module and
+ * user space application. iwl_testmode_gnl_msg_policy is to be carried
+ * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
+ * See iwl-testmode.h
+ */
+static
+struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
+       [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
+
+       [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
+       [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
+
+       [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
+       [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
+       [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
+
+       [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
+       [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
+
+       [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
+
+       [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
+       [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
+       [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
+
+       [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
+
+       [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
+
+       [IWL_TM_ATTR_SRAM_ADDR] = { .type = NLA_U32, },
+       [IWL_TM_ATTR_SRAM_SIZE] = { .type = NLA_U32, },
+       [IWL_TM_ATTR_SRAM_DUMP] = { .type = NLA_UNSPEC, },
+
+       [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
+       [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
+};
+
+/*
+ * See the struct iwl_rx_packet in iwl-commands.h for the format of the
+ * received events from the device
+ */
+static inline int get_event_length(struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       if (pkt)
+               return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+       else
+               return 0;
+}
+
+
+/*
+ * This function multicasts the spontaneous messages from the device to the
+ * user space. It is invoked whenever there is a received messages
+ * from the device. This function is called within the ISR of the rx handlers
+ * in iwlagn driver.
+ *
+ * The parsing of the message content is left to the user space application,
+ * The message content is treated as unattacked raw data and is encapsulated
+ * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space.
+ *
+ * @priv: the instance of iwlwifi device
+ * @rxb: pointer to rx data content received by the ISR
+ *
+ * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[].
+ * For the messages multicasting to the user application, the mandatory
+ * TLV fields are :
+ *     IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
+ *     IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content
+ */
+
+static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct ieee80211_hw *hw = priv->hw;
+       struct sk_buff *skb;
+       void *data;
+       int length;
+
+       data = (void *)rxb_addr(rxb);
+       length = get_event_length(rxb);
+
+       if (!data || length == 0)
+               return;
+
+       skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length,
+                                                               GFP_ATOMIC);
+       if (skb == NULL) {
+               IWL_DEBUG_INFO(priv,
+                        "Run out of memory for messages to user space ?\n");
+               return;
+       }
+       NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
+       NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data);
+       cfg80211_testmode_event(skb, GFP_ATOMIC);
+       return;
+
+nla_put_failure:
+       kfree_skb(skb);
+       IWL_DEBUG_INFO(priv, "Ouch, overran buffer, check allocation!\n");
+}
+
+void iwl_testmode_init(struct iwl_priv *priv)
+{
+       priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
+       priv->testmode_trace.trace_enabled = false;
+       priv->testmode_sram.sram_readed = false;
+}
+
+static void iwl_sram_cleanup(struct iwl_priv *priv)
+{
+       if (priv->testmode_sram.sram_readed) {
+               kfree(priv->testmode_sram.buff_addr);
+               priv->testmode_sram.buff_addr = NULL;
+               priv->testmode_sram.buff_size = 0;
+               priv->testmode_sram.num_chunks = 0;
+               priv->testmode_sram.sram_readed = false;
+       }
+}
+
+static void iwl_trace_cleanup(struct iwl_priv *priv)
+{
+       if (priv->testmode_trace.trace_enabled) {
+               if (priv->testmode_trace.cpu_addr &&
+                   priv->testmode_trace.dma_addr)
+                       dma_free_coherent(bus(priv)->dev,
+                                       priv->testmode_trace.total_size,
+                                       priv->testmode_trace.cpu_addr,
+                                       priv->testmode_trace.dma_addr);
+               priv->testmode_trace.trace_enabled = false;
+               priv->testmode_trace.cpu_addr = NULL;
+               priv->testmode_trace.trace_addr = NULL;
+               priv->testmode_trace.dma_addr = 0;
+               priv->testmode_trace.buff_size = 0;
+               priv->testmode_trace.total_size = 0;
+       }
+}
+
+
+void iwl_testmode_cleanup(struct iwl_priv *priv)
+{
+       iwl_trace_cleanup(priv);
+       iwl_sram_cleanup(priv);
+}
+
+/*
+ * This function handles the user application commands to the ucode.
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and
+ * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the
+ * host command to the ucode.
+ *
+ * If any mandatory field is missing, -ENOMSG is replied to the user space
+ * application; otherwise, the actual execution result of the host command to
+ * ucode is replied.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_host_cmd cmd;
+
+       memset(&cmd, 0, sizeof(struct iwl_host_cmd));
+
+       if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
+           !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
+               IWL_DEBUG_INFO(priv,
+                       "Error finding ucode command mandatory fields\n");
+               return -ENOMSG;
+       }
+
+       cmd.flags = CMD_ON_DEMAND;
+       cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
+       cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
+       cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
+       cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+       IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
+                               " len %d\n", cmd.id, cmd.flags, cmd.len[0]);
+       /* ok, let's submit the command to ucode */
+       return iwl_trans_send_cmd(trans(priv), &cmd);
+}
+
+
+/*
+ * This function handles the user application commands for register access.
+ *
+ * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
+ * handlers respectively.
+ *
+ * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
+ * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32,
+ * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating
+ * the success of the command execution.
+ *
+ * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read
+ * value is returned with IWL_TM_ATTR_REG_VALUE32.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+       struct iwl_priv *priv = hw->priv;
+       u32 ofs, val32;
+       u8 val8;
+       struct sk_buff *skb;
+       int status = 0;
+
+       if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
+               IWL_DEBUG_INFO(priv, "Error finding register offset\n");
+               return -ENOMSG;
+       }
+       ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
+       IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
+
+       switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+               val32 = iwl_read32(bus(priv), ofs);
+               IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
+
+               skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+               if (!skb) {
+                       IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+                       return -ENOMEM;
+               }
+               NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
+               status = cfg80211_testmode_reply(skb);
+               if (status < 0)
+                       IWL_DEBUG_INFO(priv,
+                                      "Error sending msg : %d\n", status);
+               break;
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+               if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
+                       IWL_DEBUG_INFO(priv,
+                                      "Error finding value to write\n");
+                       return -ENOMSG;
+               } else {
+                       val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
+                       IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
+                       iwl_write32(bus(priv), ofs, val32);
+               }
+               break;
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+               if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
+                       IWL_DEBUG_INFO(priv, "Error finding value to write\n");
+                       return -ENOMSG;
+               } else {
+                       val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
+                       IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
+                       iwl_write8(bus(priv), ofs, val8);
+               }
+               break;
+       case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+               val32 = iwl_read_prph(bus(priv), ofs);
+               IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
+
+               skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+               if (!skb) {
+                       IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+                       return -ENOMEM;
+               }
+               NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
+               status = cfg80211_testmode_reply(skb);
+               if (status < 0)
+                       IWL_DEBUG_INFO(priv,
+                                       "Error sending msg : %d\n", status);
+               break;
+       case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+               if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
+                       IWL_DEBUG_INFO(priv,
+                                       "Error finding value to write\n");
+                       return -ENOMSG;
+               } else {
+                       val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
+                       IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
+                       iwl_write_prph(bus(priv), ofs, val32);
+               }
+               break;
+       default:
+               IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
+               return -ENOSYS;
+       }
+
+       return status;
+
+nla_put_failure:
+       kfree_skb(skb);
+       return -EMSGSIZE;
+}
+
+
+static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
+{
+       struct iwl_notification_wait calib_wait;
+       int ret;
+
+       iwl_init_notification_wait(priv->shrd, &calib_wait,
+                                     CALIBRATION_COMPLETE_NOTIFICATION,
+                                     NULL, NULL);
+       ret = iwlagn_init_alive_start(priv);
+       if (ret) {
+               IWL_DEBUG_INFO(priv,
+                       "Error configuring init calibration: %d\n", ret);
+               goto cfg_init_calib_error;
+       }
+
+       ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ);
+       if (ret)
+               IWL_DEBUG_INFO(priv, "Error detecting"
+                       " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
+       return ret;
+
+cfg_init_calib_error:
+       iwl_remove_notification(priv->shrd, &calib_wait);
+       return ret;
+}
+
+/*
+ * This function handles the user application commands for driver.
+ *
+ * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
+ * handlers respectively.
+ *
+ * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
+ * value of the actual command execution is replied to the user application.
+ *
+ * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
+ * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
+ * IWL_TM_CMD_DEV2APP_SYNC_RSP.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct sk_buff *skb;
+       unsigned char *rsp_data_ptr = NULL;
+       int status = 0, rsp_data_len = 0;
+       char buf[32], *ptr = NULL;
+       unsigned int num, devid;
+
+       switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
+       case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
+               rsp_data_ptr = (unsigned char *)priv->cfg->name;
+               rsp_data_len = strlen(priv->cfg->name);
+               skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
+                                                       rsp_data_len + 20);
+               if (!skb) {
+                       IWL_DEBUG_INFO(priv,
+                                      "Error allocating memory\n");
+                       return -ENOMEM;
+               }
+               NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
+                           IWL_TM_CMD_DEV2APP_SYNC_RSP);
+               NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP,
+                       rsp_data_len, rsp_data_ptr);
+               status = cfg80211_testmode_reply(skb);
+               if (status < 0)
+                       IWL_DEBUG_INFO(priv, "Error sending msg : %d\n",
+                                      status);
+               break;
+
+       case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
+               status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
+               if (status)
+                       IWL_DEBUG_INFO(priv,
+                               "Error loading init ucode: %d\n", status);
+               break;
+
+       case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
+               iwl_testmode_cfg_init_calib(priv);
+               iwl_trans_stop_device(trans(priv));
+               break;
+
+       case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
+               status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
+               if (status) {
+                       IWL_DEBUG_INFO(priv,
+                               "Error loading runtime ucode: %d\n", status);
+                       break;
+               }
+               status = iwl_alive_start(priv);
+               if (status)
+                       IWL_DEBUG_INFO(priv,
+                               "Error starting the device: %d\n", status);
+               break;
+
+       case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+               iwl_scan_cancel_timeout(priv, 200);
+               iwl_trans_stop_device(trans(priv));
+               status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
+               if (status) {
+                       IWL_DEBUG_INFO(priv,
+                               "Error loading WOWLAN ucode: %d\n", status);
+                       break;
+               }
+               status = iwl_alive_start(priv);
+               if (status)
+                       IWL_DEBUG_INFO(priv,
+                               "Error starting the device: %d\n", status);
+               break;
+
+       case IWL_TM_CMD_APP2DEV_GET_EEPROM:
+               if (priv->shrd->eeprom) {
+                       skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
+                               priv->cfg->base_params->eeprom_size + 20);
+                       if (!skb) {
+                               IWL_DEBUG_INFO(priv,
+                                      "Error allocating memory\n");
+                               return -ENOMEM;
+                       }
+                       NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
+                               IWL_TM_CMD_DEV2APP_EEPROM_RSP);
+                       NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
+                               priv->cfg->base_params->eeprom_size,
+                               priv->shrd->eeprom);
+                       status = cfg80211_testmode_reply(skb);
+                       if (status < 0)
+                               IWL_DEBUG_INFO(priv,
+                                              "Error sending msg : %d\n",
+                                              status);
+               } else
+                       return -EFAULT;
+               break;
+
+       case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+               if (!tb[IWL_TM_ATTR_FIXRATE]) {
+                       IWL_DEBUG_INFO(priv,
+                                      "Error finding fixrate setting\n");
+                       return -ENOMSG;
+               }
+               priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
+               break;
+
+       case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+               IWL_INFO(priv, "uCode version raw: 0x%x\n", priv->ucode_ver);
+
+               skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+               if (!skb) {
+                       IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+                       return -ENOMEM;
+               }
+               NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, priv->ucode_ver);
+               status = cfg80211_testmode_reply(skb);
+               if (status < 0)
+                       IWL_DEBUG_INFO(priv,
+                                       "Error sending msg : %d\n", status);
+               break;
+
+       case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+               bus_get_hw_id(bus(priv), buf, sizeof(buf));
+               ptr = buf;
+               strsep(&ptr, ":");
+               sscanf(strsep(&ptr, ":"), "%x", &num);
+               sscanf(strsep(&ptr, ":"), "%x", &devid);
+               IWL_INFO(priv, "Device ID = 0x%04x, SubDevice ID= 0x%04x\n",
+                               num, devid);
+               devid |= (num << 16);
+
+               skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+               if (!skb) {
+                       IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+                       return -ENOMEM;
+               }
+               NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid);
+               status = cfg80211_testmode_reply(skb);
+               if (status < 0)
+                       IWL_DEBUG_INFO(priv,
+                                       "Error sending msg : %d\n", status);
+               break;
+
+       default:
+               IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
+               return -ENOSYS;
+       }
+       return status;
+
+nla_put_failure:
+       kfree_skb(skb);
+       return -EMSGSIZE;
+}
+
+
+/*
+ * This function handles the user application commands for uCode trace
+ *
+ * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
+ * handlers respectively.
+ *
+ * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
+ * value of the actual command execution is replied to the user application.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct sk_buff *skb;
+       int status = 0;
+       struct device *dev = bus(priv)->dev;
+
+       switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
+       case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
+               if (priv->testmode_trace.trace_enabled)
+                       return -EBUSY;
+
+               if (!tb[IWL_TM_ATTR_TRACE_SIZE])
+                       priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF;
+               else
+                       priv->testmode_trace.buff_size =
+                               nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
+               if (!priv->testmode_trace.buff_size)
+                       return -EINVAL;
+               if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN ||
+                   priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX)
+                       return -EINVAL;
+
+               priv->testmode_trace.total_size =
+                       priv->testmode_trace.buff_size + TRACE_BUFF_PADD;
+               priv->testmode_trace.cpu_addr =
+                       dma_alloc_coherent(dev,
+                                          priv->testmode_trace.total_size,
+                                          &priv->testmode_trace.dma_addr,
+                                          GFP_KERNEL);
+               if (!priv->testmode_trace.cpu_addr)
+                       return -ENOMEM;
+               priv->testmode_trace.trace_enabled = true;
+               priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
+                       priv->testmode_trace.cpu_addr, 0x100);
+               memset(priv->testmode_trace.trace_addr, 0x03B,
+                       priv->testmode_trace.buff_size);
+               skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
+                       sizeof(priv->testmode_trace.dma_addr) + 20);
+               if (!skb) {
+                       IWL_DEBUG_INFO(priv,
+                               "Error allocating memory\n");
+                       iwl_trace_cleanup(priv);
+                       return -ENOMEM;
+               }
+               NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR,
+                       sizeof(priv->testmode_trace.dma_addr),
+                       (u64 *)&priv->testmode_trace.dma_addr);
+               status = cfg80211_testmode_reply(skb);
+               if (status < 0) {
+                       IWL_DEBUG_INFO(priv,
+                                      "Error sending msg : %d\n",
+                                      status);
+               }
+               priv->testmode_trace.num_chunks =
+                       DIV_ROUND_UP(priv->testmode_trace.buff_size,
+                                    DUMP_CHUNK_SIZE);
+               break;
+
+       case IWL_TM_CMD_APP2DEV_END_TRACE:
+               iwl_trace_cleanup(priv);
+               break;
+       default:
+               IWL_DEBUG_INFO(priv, "Unknown testmode mem command ID\n");
+               return -ENOSYS;
+       }
+       return status;
+
+nla_put_failure:
+       kfree_skb(skb);
+       if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
+           IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
+               iwl_trace_cleanup(priv);
+       return -EMSGSIZE;
+}
+
+static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
+                                  struct sk_buff *skb,
+                                  struct netlink_callback *cb)
+{
+       struct iwl_priv *priv = hw->priv;
+       int idx, length;
+
+       if (priv->testmode_trace.trace_enabled &&
+           priv->testmode_trace.trace_addr) {
+               idx = cb->args[4];
+               if (idx >= priv->testmode_trace.num_chunks)
+                       return -ENOENT;
+               length = DUMP_CHUNK_SIZE;
+               if (((idx + 1) == priv->testmode_trace.num_chunks) &&
+                   (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
+                       length = priv->testmode_trace.buff_size %
+                               DUMP_CHUNK_SIZE;
+
+               NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
+                       priv->testmode_trace.trace_addr +
+                       (DUMP_CHUNK_SIZE * idx));
+               idx++;
+               cb->args[4] = idx;
+               return 0;
+       } else
+               return -EFAULT;
+
+ nla_put_failure:
+       return -ENOBUFS;
+}
+
+/*
+ * This function handles the user application switch ucode ownership.
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
+ * decide who the current owner of the uCode
+ *
+ * If the current owner is OWNERSHIP_TM, then the only host command
+ * can deliver to uCode is from testmode, all the other host commands
+ * will dropped.
+ *
+ * default driver is the owner of uCode in normal operational mode
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+       struct iwl_priv *priv = hw->priv;
+       u8 owner;
+
+       if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
+               IWL_DEBUG_INFO(priv, "Error finding ucode owner\n");
+               return -ENOMSG;
+       }
+
+       owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
+       if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM))
+               priv->shrd->ucode_owner = owner;
+       else {
+               IWL_DEBUG_INFO(priv, "Invalid owner\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+/*
+ * This function handles the user application commands for SRAM data dump
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
+ * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
+ *
+ * Several error will be retured, -EBUSY if the SRAM data retrieved by
+ * previous command has not been delivered to userspace, or -ENOMSG if
+ * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
+ * are missing, or -ENOMEM if the buffer allocation fails.
+ *
+ * Otherwise 0 is replied indicating the success of the SRAM reading.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+       struct iwl_priv *priv = hw->priv;
+       u32 base, ofs, size, maxsize;
+
+       if (priv->testmode_sram.sram_readed)
+               return -EBUSY;
+
+       if (!tb[IWL_TM_ATTR_SRAM_ADDR]) {
+               IWL_DEBUG_INFO(priv, "Error finding SRAM offset address\n");
+               return -ENOMSG;
+       }
+       ofs = nla_get_u32(tb[IWL_TM_ATTR_SRAM_ADDR]);
+       if (!tb[IWL_TM_ATTR_SRAM_SIZE]) {
+               IWL_DEBUG_INFO(priv, "Error finding size for SRAM reading\n");
+               return -ENOMSG;
+       }
+       size = nla_get_u32(tb[IWL_TM_ATTR_SRAM_SIZE]);
+       switch (priv->shrd->ucode_type) {
+       case IWL_UCODE_REGULAR:
+               maxsize = trans(priv)->ucode_rt.data.len;
+               break;
+       case IWL_UCODE_INIT:
+               maxsize = trans(priv)->ucode_init.data.len;
+               break;
+       case IWL_UCODE_WOWLAN:
+               maxsize = trans(priv)->ucode_wowlan.data.len;
+               break;
+       case IWL_UCODE_NONE:
+               IWL_DEBUG_INFO(priv, "Error, uCode does not been loaded\n");
+               return -ENOSYS;
+       default:
+               IWL_DEBUG_INFO(priv, "Error, unsupported uCode type\n");
+               return -ENOSYS;
+       }
+       if ((ofs + size) > maxsize) {
+               IWL_DEBUG_INFO(priv, "Invalid offset/size: out of range\n");
+               return -EINVAL;
+       }
+       priv->testmode_sram.buff_size = (size / 4) * 4;
+       priv->testmode_sram.buff_addr =
+               kmalloc(priv->testmode_sram.buff_size, GFP_KERNEL);
+       if (priv->testmode_sram.buff_addr == NULL) {
+               IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+               return -ENOMEM;
+       }
+       base = 0x800000;
+       _iwl_read_targ_mem_words(bus(priv), base + ofs,
+                                       priv->testmode_sram.buff_addr,
+                                       priv->testmode_sram.buff_size / 4);
+       priv->testmode_sram.num_chunks =
+               DIV_ROUND_UP(priv->testmode_sram.buff_size, DUMP_CHUNK_SIZE);
+       priv->testmode_sram.sram_readed = true;
+       return 0;
+}
+
+static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb,
+                                  struct sk_buff *skb,
+                                  struct netlink_callback *cb)
+{
+       struct iwl_priv *priv = hw->priv;
+       int idx, length;
+
+       if (priv->testmode_sram.sram_readed) {
+               idx = cb->args[4];
+               if (idx >= priv->testmode_sram.num_chunks) {
+                       iwl_sram_cleanup(priv);
+                       return -ENOENT;
+               }
+               length = DUMP_CHUNK_SIZE;
+               if (((idx + 1) == priv->testmode_sram.num_chunks) &&
+                   (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE))
+                       length = priv->testmode_sram.buff_size %
+                               DUMP_CHUNK_SIZE;
+
+               NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length,
+                       priv->testmode_sram.buff_addr +
+                       (DUMP_CHUNK_SIZE * idx));
+               idx++;
+               cb->args[4] = idx;
+               return 0;
+       } else
+               return -EFAULT;
+
+ nla_put_failure:
+       return -ENOBUFS;
+}
+
+
+/* The testmode gnl message handler that takes the gnl message from the
+ * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
+ * invoke the corresponding handlers.
+ *
+ * This function is invoked when there is user space application sending
+ * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
+ * by nl80211.
+ *
+ * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
+ * dispatching it to the corresponding handler.
+ *
+ * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
+ * -ENOSYS is replied to the user application if the command is unknown;
+ * Otherwise, the command is dispatched to the respective handler.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @data: pointer to user space message
+ * @len: length in byte of @data
+ */
+int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
+{
+       struct nlattr *tb[IWL_TM_ATTR_MAX];
+       struct iwl_priv *priv = hw->priv;
+       int result;
+
+       result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
+                       iwl_testmode_gnl_msg_policy);
+       if (result != 0) {
+               IWL_DEBUG_INFO(priv,
+                              "Error parsing the gnl message : %d\n", result);
+               return result;
+       }
+
+       /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
+       if (!tb[IWL_TM_ATTR_COMMAND]) {
+               IWL_DEBUG_INFO(priv, "Error finding testmode command type\n");
+               return -ENOMSG;
+       }
+       /* in case multiple accesses to the device happens */
+       mutex_lock(&priv->shrd->mutex);
+
+       switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
+       case IWL_TM_CMD_APP2DEV_UCODE:
+               IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
+               result = iwl_testmode_ucode(hw, tb);
+               break;
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+       case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+       case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+               IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
+               result = iwl_testmode_reg(hw, tb);
+               break;
+       case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
+       case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
+       case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
+       case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
+       case IWL_TM_CMD_APP2DEV_GET_EEPROM:
+       case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+       case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+       case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+       case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+               IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
+               result = iwl_testmode_driver(hw, tb);
+               break;
+
+       case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
+       case IWL_TM_CMD_APP2DEV_END_TRACE:
+       case IWL_TM_CMD_APP2DEV_READ_TRACE:
+               IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n");
+               result = iwl_testmode_trace(hw, tb);
+               break;
+
+       case IWL_TM_CMD_APP2DEV_OWNERSHIP:
+               IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
+               result = iwl_testmode_ownership(hw, tb);
+               break;
+
+       case IWL_TM_CMD_APP2DEV_READ_SRAM:
+               IWL_DEBUG_INFO(priv, "testmode sram read cmd to driver\n");
+               result = iwl_testmode_sram(hw, tb);
+               break;
+
+       default:
+               IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
+               result = -ENOSYS;
+               break;
+       }
+
+       mutex_unlock(&priv->shrd->mutex);
+       return result;
+}
+
+int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+                     struct netlink_callback *cb,
+                     void *data, int len)
+{
+       struct nlattr *tb[IWL_TM_ATTR_MAX];
+       struct iwl_priv *priv = hw->priv;
+       int result;
+       u32 cmd;
+
+       if (cb->args[3]) {
+               /* offset by 1 since commands start at 0 */
+               cmd = cb->args[3] - 1;
+       } else {
+               result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
+                               iwl_testmode_gnl_msg_policy);
+               if (result) {
+                       IWL_DEBUG_INFO(priv,
+                              "Error parsing the gnl message : %d\n", result);
+                       return result;
+               }
+
+               /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
+               if (!tb[IWL_TM_ATTR_COMMAND]) {
+                       IWL_DEBUG_INFO(priv,
+                               "Error finding testmode command type\n");
+                       return -ENOMSG;
+               }
+               cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
+               cb->args[3] = cmd + 1;
+       }
+
+       /* in case multiple accesses to the device happens */
+       mutex_lock(&priv->shrd->mutex);
+       switch (cmd) {
+       case IWL_TM_CMD_APP2DEV_READ_TRACE:
+               IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
+               result = iwl_testmode_trace_dump(hw, tb, skb, cb);
+               break;
+       case IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+               IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
+               result = iwl_testmode_sram_dump(hw, tb, skb, cb);
+               break;
+       default:
+               result = -EINVAL;
+               break;
+       }
+
+       mutex_unlock(&priv->shrd->mutex);
+       return result;
+}
index b980bda..26138f1 100644 (file)
@@ -76,9 +76,9 @@
  *     the actual uCode host command ID is carried with
  *     IWL_TM_ATTR_UCODE_CMD_ID
  *
- * @IWL_TM_CMD_APP2DEV_REG_READ32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
  *     commands from user applicaiton to access register
  *
  * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
  * @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
  *     commands from kernel space to carry the eeprom response
  *     to user application
+ *
  * @IWL_TM_CMD_APP2DEV_OWNERSHIP:
  *     commands from user application to own change the ownership of the uCode
  *     if application has the ownership, the only host command from
  *     testmode will deliver to uCode. Default owner is driver
+ *
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+ *     commands from user applicaiton to indirectly access peripheral register
+ *
+ * @IWL_TM_CMD_APP2DEV_READ_SRAM:
+ * @IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ *     commands from user applicaiton to read data in sram
+ *
+ * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Weak On Wireless LAN uCode image
+ * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
+ * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
+ *
  */
 enum iwl_tm_cmd_t {
        IWL_TM_CMD_APP2DEV_UCODE                = 1,
-       IWL_TM_CMD_APP2DEV_REG_READ32           = 2,
-       IWL_TM_CMD_APP2DEV_REG_WRITE32          = 3,
-       IWL_TM_CMD_APP2DEV_REG_WRITE8           = 4,
+       IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32    = 2,
+       IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32   = 3,
+       IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8    = 4,
        IWL_TM_CMD_APP2DEV_GET_DEVICENAME       = 5,
        IWL_TM_CMD_APP2DEV_LOAD_INIT_FW         = 6,
        IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB       = 7,
@@ -126,7 +140,14 @@ enum iwl_tm_cmd_t {
        IWL_TM_CMD_DEV2APP_UCODE_RX_PKT         = 15,
        IWL_TM_CMD_DEV2APP_EEPROM_RSP           = 16,
        IWL_TM_CMD_APP2DEV_OWNERSHIP            = 17,
-       IWL_TM_CMD_MAX                          = 18,
+       IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32  = 18,
+       IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32 = 19,
+       IWL_TM_CMD_APP2DEV_READ_SRAM            = 20,
+       IWL_TM_CMD_APP2DEV_DUMP_SRAM            = 21,
+       IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW       = 22,
+       IWL_TM_CMD_APP2DEV_GET_FW_VERSION       = 23,
+       IWL_TM_CMD_APP2DEV_GET_DEVICE_ID        = 24,
+       IWL_TM_CMD_MAX                          = 25,
 };
 
 /*
@@ -196,6 +217,26 @@ enum iwl_tm_cmd_t {
  *     When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
  *     The mandatory fields are:
  *     IWL_TM_ATTR_UCODE_OWNER for the new owner
+ *
+ * @IWL_TM_ATTR_SRAM_ADDR:
+ * @IWL_TM_ATTR_SRAM_SIZE:
+ *     When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_READ_SRAM,
+ *     The mandatory fields are:
+ *     IWL_TM_ATTR_SRAM_ADDR for the address in sram
+ *     IWL_TM_ATTR_SRAM_SIZE for the buffer size of data reading
+ *
+ * @IWL_TM_ATTR_SRAM_DUMP:
+ *     When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_DUMP_SRAM,
+ *     IWL_TM_ATTR_SRAM_DUMP for the data in sram
+ *
+ * @IWL_TM_ATTR_FW_VERSION:
+ *     When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
+ *     IWL_TM_ATTR_FW_VERSION for the uCode version
+ *
+ * @IWL_TM_ATTR_DEVICE_ID:
+ *     When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
+ *     IWL_TM_ATTR_DEVICE_ID for the device ID information
+ *
  */
 enum iwl_tm_attr_t {
        IWL_TM_ATTR_NOT_APPLICABLE              = 0,
@@ -213,7 +254,12 @@ enum iwl_tm_attr_t {
        IWL_TM_ATTR_TRACE_DUMP                  = 12,
        IWL_TM_ATTR_FIXRATE                     = 13,
        IWL_TM_ATTR_UCODE_OWNER                 = 14,
-       IWL_TM_ATTR_MAX                         = 15,
+       IWL_TM_ATTR_SRAM_ADDR                   = 15,
+       IWL_TM_ATTR_SRAM_SIZE                   = 16,
+       IWL_TM_ATTR_SRAM_DUMP                   = 17,
+       IWL_TM_ATTR_FW_VERSION                  = 18,
+       IWL_TM_ATTR_DEVICE_ID                   = 19,
+       IWL_TM_ATTR_MAX                         = 20,
 };
 
 /* uCode trace buffer */
@@ -221,6 +267,8 @@ enum iwl_tm_attr_t {
 #define TRACE_BUFF_SIZE_MIN    0x20000
 #define TRACE_BUFF_SIZE_DEF    TRACE_BUFF_SIZE_MIN
 #define TRACE_BUFF_PADD                0x2000
-#define TRACE_CHUNK_SIZE       (PAGE_SIZE - 1024)
+
+/* Maximum data size of each dump it packet */
+#define DUMP_CHUNK_SIZE                (PAGE_SIZE - 1024)
 
 #endif
index ee126f8..2ee00e0 100644 (file)
@@ -594,8 +594,8 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
        struct iwl_trans_pcie *trans_pcie =
                IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       base = priv->device_pointers.error_event_table;
-       if (priv->ucode_type == IWL_UCODE_INIT) {
+       base = trans->shrd->device_pointers.error_event_table;
+       if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
                if (!base)
                        base = priv->init_errlog_ptr;
        } else {
@@ -607,7 +607,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
                IWL_ERR(trans,
                        "Not valid error log pointer 0x%08X for %s uCode\n",
                        base,
-                       (priv->ucode_type == IWL_UCODE_INIT)
+                       (trans->shrd->ucode_type == IWL_UCODE_INIT)
                                        ? "Init" : "RT");
                return;
        }
@@ -648,6 +648,21 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
        IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
        IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
        IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
+
+       IWL_ERR(trans, "0x%08X | isr0\n", table.isr0);
+       IWL_ERR(trans, "0x%08X | isr1\n", table.isr1);
+       IWL_ERR(trans, "0x%08X | isr2\n", table.isr2);
+       IWL_ERR(trans, "0x%08X | isr3\n", table.isr3);
+       IWL_ERR(trans, "0x%08X | isr4\n", table.isr4);
+       IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref);
+       IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event);
+       IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control);
+       IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration);
+       IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+       IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+       IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+       IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp);
+       IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler);
 }
 
 /**
@@ -709,8 +724,8 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
        if (num_events == 0)
                return pos;
 
-       base = priv->device_pointers.log_event_table;
-       if (priv->ucode_type == IWL_UCODE_INIT) {
+       base = trans->shrd->device_pointers.log_event_table;
+       if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
                if (!base)
                        base = priv->init_evtlog_ptr;
        } else {
@@ -823,8 +838,8 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
        size_t bufsz = 0;
        struct iwl_priv *priv = priv(trans);
 
-       base = priv->device_pointers.log_event_table;
-       if (priv->ucode_type == IWL_UCODE_INIT) {
+       base = trans->shrd->device_pointers.log_event_table;
+       if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
                logsize = priv->init_evtlog_size;
                if (!base)
                        base = priv->init_evtlog_ptr;
@@ -838,7 +853,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
                IWL_ERR(trans,
                        "Invalid event log pointer 0x%08X for %s uCode\n",
                        base,
-                       (priv->ucode_type == IWL_UCODE_INIT)
+                       (trans->shrd->ucode_type == IWL_UCODE_INIT)
                                        ? "Init" : "RT");
                return -EINVAL;
        }
index 5954fdf..3cf62c3 100644 (file)
@@ -1100,13 +1100,21 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                hdr->seq_ctrl = hdr->seq_ctrl &
                                cpu_to_le16(IEEE80211_SCTL_FRAG);
                hdr->seq_ctrl |= cpu_to_le16(seq_number);
-               seq_number += 0x10;
                /* aggregation is on for this <sta,tid> */
                if (info->flags & IEEE80211_TX_CTL_AMPDU) {
-                       WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON);
+                       if (WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON)) {
+                               IWL_ERR(trans, "TX_CTL_AMPDU while not in AGG:"
+                                       " Tx flags = 0x%08x, agg.state = %d",
+                                       info->flags, tid_data->agg.state);
+                               IWL_ERR(trans, "sta_id = %d, tid = %d "
+                                       "txq_id = %d, seq_num = %d", sta_id,
+                                       tid, tid_data->agg.txq_id,
+                                       seq_number >> 4);
+                       }
                        txq_id = tid_data->agg.txq_id;
                        is_agg = true;
                }
+               seq_number += 0x10;
        }
 
        /* Copy MAC header from skb into command buffer */
@@ -1197,9 +1205,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
 
        /* Set up entry for this TFD in Tx byte-count array */
-       if (is_agg)
-               iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
-                                              le16_to_cpu(tx_cmd->len));
+       iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
        dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
                        DMA_BIDIRECTIONAL);
@@ -1365,6 +1371,7 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
 
 static void iwl_trans_pcie_free(struct iwl_trans *trans)
 {
+       iwl_calib_free_results(trans);
        iwl_trans_pcie_tx_free(trans);
        iwl_trans_pcie_rx_free(trans);
        free_irq(bus(trans)->irq, trans);
index 50227eb..f94a6ee 100644 (file)
@@ -220,11 +220,12 @@ struct fw_img {
        struct fw_desc data;    /* firmware data image */
 };
 
-enum iwl_ucode_type {
-       IWL_UCODE_NONE,
-       IWL_UCODE_REGULAR,
-       IWL_UCODE_INIT,
-       IWL_UCODE_WOWLAN,
+/* Opaque calibration results */
+struct iwl_calib_result {
+       struct list_head list;
+       size_t cmd_len;
+       struct iwl_calib_hdr hdr;
+       /* data follows */
 };
 
 /**
@@ -236,6 +237,8 @@ enum iwl_ucode_type {
  * @ucode_rt: run time ucode image
  * @ucode_init: init ucode image
  * @ucode_wowlan: wake on wireless ucode image (optional)
+ * @nvm_device_type: indicates OTP or eeprom
+ * @calib_results: list head for init calibration results
  */
 struct iwl_trans {
        const struct iwl_trans_ops *ops;
@@ -250,6 +253,9 @@ struct iwl_trans {
        /* eeprom related variables */
        int    nvm_device_type;
 
+       /* init calibration results */
+       struct list_head calib_results;
+
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
        char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
@@ -386,4 +392,9 @@ int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
                      const void *data, size_t len);
 void iwl_dealloc_ucode(struct iwl_trans *trans);
 
+int iwl_send_calib_results(struct iwl_trans *trans);
+int iwl_calib_set(struct iwl_trans *trans,
+                 const struct iwl_calib_hdr *cmd, int len);
+void iwl_calib_free_results(struct iwl_trans *trans);
+
 #endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
new file mode 100644 (file)
index 0000000..0577212
--- /dev/null
@@ -0,0 +1,757 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+#include "iwl-agn-calib.h"
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+
+static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
+       {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
+        0, COEX_UNASSOC_IDLE_FLAGS},
+       {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
+        0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
+       {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
+        0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
+       {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
+        0, COEX_CALIBRATION_FLAGS},
+       {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
+        0, COEX_PERIODIC_CALIBRATION_FLAGS},
+       {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
+        0, COEX_CONNECTION_ESTAB_FLAGS},
+       {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
+        0, COEX_ASSOCIATED_IDLE_FLAGS},
+       {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
+        0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
+       {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
+        0, COEX_ASSOC_AUTO_SCAN_FLAGS},
+       {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
+        0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
+       {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
+       {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
+       {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
+        0, COEX_STAND_ALONE_DEBUG_FLAGS},
+       {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
+        0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
+       {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
+       {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc)
+{
+       if (desc->v_addr)
+               dma_free_coherent(bus->dev, desc->len,
+                                 desc->v_addr, desc->p_addr);
+       desc->v_addr = NULL;
+       desc->len = 0;
+}
+
+static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img)
+{
+       iwl_free_fw_desc(bus, &img->code);
+       iwl_free_fw_desc(bus, &img->data);
+}
+
+void iwl_dealloc_ucode(struct iwl_trans *trans)
+{
+       iwl_free_fw_img(bus(trans), &trans->ucode_rt);
+       iwl_free_fw_img(bus(trans), &trans->ucode_init);
+       iwl_free_fw_img(bus(trans), &trans->ucode_wowlan);
+}
+
+int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
+                     const void *data, size_t len)
+{
+       if (!len) {
+               desc->v_addr = NULL;
+               return -EINVAL;
+       }
+
+       desc->v_addr = dma_alloc_coherent(bus->dev, len,
+                                         &desc->p_addr, GFP_KERNEL);
+       if (!desc->v_addr)
+               return -ENOMEM;
+
+       desc->len = len;
+       memcpy(desc->v_addr, data, len);
+       return 0;
+}
+
+/*
+ * ucode
+ */
+static int iwl_load_section(struct iwl_trans *trans, const char *name,
+                               struct fw_desc *image, u32 dst_addr)
+{
+       struct iwl_bus *bus = bus(trans);
+       dma_addr_t phy_addr = image->p_addr;
+       u32 byte_cnt = image->len;
+       int ret;
+
+       trans->ucode_write_complete = 0;
+
+       iwl_write_direct32(bus,
+               FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+       iwl_write_direct32(bus,
+               FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
+
+       iwl_write_direct32(bus,
+               FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+               phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+       iwl_write_direct32(bus,
+               FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+               (iwl_get_dma_hi_addr(phy_addr)
+                       << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+       iwl_write_direct32(bus,
+               FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+               1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+               1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+               FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+       iwl_write_direct32(bus,
+               FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE       |
+               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE    |
+               FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+       IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name);
+       ret = wait_event_timeout(trans->shrd->wait_command_queue,
+                                trans->ucode_write_complete, 5 * HZ);
+       if (!ret) {
+               IWL_ERR(trans, "Could not load the %s uCode section\n",
+                       name);
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
+                                       enum iwl_ucode_type ucode_type)
+{
+       switch (ucode_type) {
+       case IWL_UCODE_INIT:
+               return &trans->ucode_init;
+       case IWL_UCODE_WOWLAN:
+               return &trans->ucode_wowlan;
+       case IWL_UCODE_REGULAR:
+               return &trans->ucode_rt;
+       case IWL_UCODE_NONE:
+               break;
+       }
+       return NULL;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+                                  enum iwl_ucode_type ucode_type)
+{
+       int ret = 0;
+       struct fw_img *image = iwl_get_ucode_image(trans, ucode_type);
+
+
+       if (!image) {
+               IWL_ERR(trans, "Invalid ucode requested (%d)\n",
+                       ucode_type);
+               return -EINVAL;
+       }
+
+       ret = iwl_load_section(trans, "INST", &image->code,
+                                  IWLAGN_RTC_INST_LOWER_BOUND);
+       if (ret)
+               return ret;
+
+       return iwl_load_section(trans, "DATA", &image->data,
+                                   IWLAGN_RTC_DATA_LOWER_BOUND);
+}
+
+/*
+ *  Calibration
+ */
+static int iwl_set_Xtal_calib(struct iwl_priv *priv)
+{
+       struct iwl_calib_xtal_freq_cmd cmd;
+       __le16 *xtal_calib =
+               (__le16 *)iwl_eeprom_query_addr(priv->shrd, EEPROM_XTAL);
+
+       iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
+       cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
+       cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
+       return iwl_calib_set(trans(priv), (void *)&cmd, sizeof(cmd));
+}
+
+static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
+{
+       struct iwl_calib_temperature_offset_cmd cmd;
+       __le16 *offset_calib =
+               (__le16 *)iwl_eeprom_query_addr(priv->shrd,
+                                               EEPROM_RAW_TEMPERATURE);
+
+       memset(&cmd, 0, sizeof(cmd));
+       iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
+       memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
+       if (!(cmd.radio_sensor_offset))
+               cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
+
+       IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
+                       le16_to_cpu(cmd.radio_sensor_offset));
+       return iwl_calib_set(trans(priv), (void *)&cmd, sizeof(cmd));
+}
+
+static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
+{
+       struct iwl_calib_temperature_offset_v2_cmd cmd;
+       __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv->shrd,
+                                    EEPROM_KELVIN_TEMPERATURE);
+       __le16 *offset_calib_low =
+               (__le16 *)iwl_eeprom_query_addr(priv->shrd,
+                                               EEPROM_RAW_TEMPERATURE);
+       struct iwl_eeprom_calib_hdr *hdr;
+
+       memset(&cmd, 0, sizeof(cmd));
+       iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
+       hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv->shrd,
+                                                       EEPROM_CALIB_ALL);
+       memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
+               sizeof(*offset_calib_high));
+       memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
+               sizeof(*offset_calib_low));
+       if (!(cmd.radio_sensor_offset_low)) {
+               IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
+               cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
+               cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
+       }
+       memcpy(&cmd.burntVoltageRef, &hdr->voltage,
+               sizeof(hdr->voltage));
+
+       IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
+                       le16_to_cpu(cmd.radio_sensor_offset_high));
+       IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
+                       le16_to_cpu(cmd.radio_sensor_offset_low));
+       IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
+                       le16_to_cpu(cmd.burntVoltageRef));
+
+       return iwl_calib_set(trans(priv), (void *)&cmd, sizeof(cmd));
+}
+
+static int iwl_send_calib_cfg(struct iwl_trans *trans)
+{
+       struct iwl_calib_cfg_cmd calib_cfg_cmd;
+       struct iwl_host_cmd cmd = {
+               .id = CALIBRATION_CFG_CMD,
+               .len = { sizeof(struct iwl_calib_cfg_cmd), },
+               .data = { &calib_cfg_cmd, },
+       };
+
+       memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
+       calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
+       calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
+       calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
+       calib_cfg_cmd.ucd_calib_cfg.flags =
+               IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
+
+       return iwl_trans_send_cmd(trans, &cmd);
+}
+
+int iwlagn_rx_calib_result(struct iwl_priv *priv,
+                           struct iwl_rx_mem_buffer *rxb,
+                           struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
+       int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+       /* reduce the size of the length field itself */
+       len -= 4;
+
+       if (iwl_calib_set(trans(priv), hdr, len))
+               IWL_ERR(priv, "Failed to record calibration data %d\n",
+                       hdr->op_code);
+
+       return 0;
+}
+
+int iwlagn_init_alive_start(struct iwl_priv *priv)
+{
+       int ret;
+
+       if (priv->cfg->bt_params &&
+           priv->cfg->bt_params->advanced_bt_coexist) {
+               /*
+                * Tell uCode we are ready to perform calibration
+                * need to perform this before any calibration
+                * no need to close the envlope since we are going
+                * to load the runtime uCode later.
+                */
+               ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
+                       BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+               if (ret)
+                       return ret;
+
+       }
+
+       ret = iwl_send_calib_cfg(trans(priv));
+       if (ret)
+               return ret;
+
+       /**
+        * temperature offset calibration is only needed for runtime ucode,
+        * so prepare the value now.
+        */
+       if (priv->cfg->need_temp_offset_calib) {
+               if (priv->cfg->temp_offset_v2)
+                       return iwl_set_temperature_offset_calib_v2(priv);
+               else
+                       return iwl_set_temperature_offset_calib(priv);
+       }
+
+       return 0;
+}
+
+static int iwl_send_wimax_coex(struct iwl_priv *priv)
+{
+       struct iwl_wimax_coex_cmd coex_cmd;
+
+       if (priv->cfg->base_params->support_wimax_coexist) {
+               /* UnMask wake up src at associated sleep */
+               coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
+
+               /* UnMask wake up src at unassociated sleep */
+               coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
+               memcpy(coex_cmd.sta_prio, cu_priorities,
+                       sizeof(struct iwl_wimax_coex_event_entry) *
+                        COEX_NUM_OF_EVENTS);
+
+               /* enabling the coexistence feature */
+               coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
+
+               /* enabling the priorities tables */
+               coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
+       } else {
+               /* coexistence is disabled */
+               memset(&coex_cmd, 0, sizeof(coex_cmd));
+       }
+       return iwl_trans_send_cmd_pdu(trans(priv),
+                               COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
+                               sizeof(coex_cmd), &coex_cmd);
+}
+
+static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
+       ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+       ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+               (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+       ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+       ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+               (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+       ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+       ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+               (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+       ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+       ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+       ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+               (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+       0, 0, 0, 0, 0, 0, 0
+};
+
+void iwl_send_prio_tbl(struct iwl_trans *trans)
+{
+       struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
+
+       memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
+               sizeof(iwl_bt_prio_tbl));
+       if (iwl_trans_send_cmd_pdu(trans,
+                               REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
+                               sizeof(prio_tbl_cmd), &prio_tbl_cmd))
+               IWL_ERR(trans, "failed to send BT prio tbl command\n");
+}
+
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type)
+{
+       struct iwl_bt_coex_prot_env_cmd env_cmd;
+       int ret;
+
+       env_cmd.action = action;
+       env_cmd.type = type;
+       ret = iwl_trans_send_cmd_pdu(trans,
+                              REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
+                              sizeof(env_cmd), &env_cmd);
+       if (ret)
+               IWL_ERR(trans, "failed to send BT env command\n");
+       return ret;
+}
+
+
+static int iwl_alive_notify(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx;
+       int ret;
+
+       if (!priv->tx_cmd_pool)
+               priv->tx_cmd_pool =
+                       kmem_cache_create("iwl_dev_cmd",
+                                         sizeof(struct iwl_device_cmd),
+                                         sizeof(void *), 0, NULL);
+
+       if (!priv->tx_cmd_pool)
+               return -ENOMEM;
+
+       iwl_trans_tx_start(trans(priv));
+       for_each_context(priv, ctx)
+               ctx->last_tx_rejected = false;
+
+       ret = iwl_send_wimax_coex(priv);
+       if (ret)
+               return ret;
+
+       if (!priv->cfg->no_xtal_calib) {
+               ret = iwl_set_Xtal_calib(priv);
+               if (ret)
+                       return ret;
+       }
+
+       return iwl_send_calib_results(trans(priv));
+}
+
+
+/**
+ * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int iwl_verify_inst_sparse(struct iwl_bus *bus,
+                                     struct fw_desc *fw_desc)
+{
+       __le32 *image = (__le32 *)fw_desc->v_addr;
+       u32 len = fw_desc->len;
+       u32 val;
+       u32 i;
+
+       IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
+
+       for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
+               /* read data comes through single port, auto-incr addr */
+               /* NOTE: Use the debugless read so we don't flood kernel log
+                * if IWL_DL_IO is set */
+               iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
+                       i + IWLAGN_RTC_INST_LOWER_BOUND);
+               val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image))
+                       return -EIO;
+       }
+
+       return 0;
+}
+
+static void iwl_print_mismatch_inst(struct iwl_bus *bus,
+                                   struct fw_desc *fw_desc)
+{
+       __le32 *image = (__le32 *)fw_desc->v_addr;
+       u32 len = fw_desc->len;
+       u32 val;
+       u32 offs;
+       int errors = 0;
+
+       IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
+
+       iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
+                          IWLAGN_RTC_INST_LOWER_BOUND);
+
+       for (offs = 0;
+            offs < len && errors < 20;
+            offs += sizeof(u32), image++) {
+               /* read data comes through single port, auto-incr addr */
+               val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image)) {
+                       IWL_ERR(bus, "uCode INST section at "
+                               "offset 0x%x, is 0x%x, s/b 0x%x\n",
+                               offs, val, le32_to_cpu(*image));
+                       errors++;
+               }
+       }
+}
+
+/**
+ * iwl_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+static int iwl_verify_ucode(struct iwl_trans *trans,
+                           enum iwl_ucode_type ucode_type)
+{
+       struct fw_img *img = iwl_get_ucode_image(trans, ucode_type);
+
+       if (!img) {
+               IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type);
+               return -EINVAL;
+       }
+
+       if (!iwl_verify_inst_sparse(bus(trans), &img->code)) {
+               IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
+
+       iwl_print_mismatch_inst(bus(trans), &img->code);
+       return -EIO;
+}
+
+struct iwlagn_alive_data {
+       bool valid;
+       u8 subtype;
+};
+
+static void iwl_alive_fn(struct iwl_trans *trans,
+                           struct iwl_rx_packet *pkt,
+                           void *data)
+{
+       struct iwlagn_alive_data *alive_data = data;
+       struct iwl_alive_resp *palive;
+
+       palive = &pkt->u.alive_frame;
+
+       IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision "
+                      "0x%01X 0x%01X\n",
+                      palive->is_valid, palive->ver_type,
+                      palive->ver_subtype);
+
+       trans->shrd->device_pointers.error_event_table =
+               le32_to_cpu(palive->error_event_table_ptr);
+       trans->shrd->device_pointers.log_event_table =
+               le32_to_cpu(palive->log_event_table_ptr);
+
+       alive_data->subtype = palive->ver_subtype;
+       alive_data->valid = palive->is_valid == UCODE_VALID_OK;
+}
+
+/* notification wait support */
+void iwl_init_notification_wait(struct iwl_shared *shrd,
+                                  struct iwl_notification_wait *wait_entry,
+                                  u8 cmd,
+                                  void (*fn)(struct iwl_trans *trans,
+                                             struct iwl_rx_packet *pkt,
+                                             void *data),
+                                  void *fn_data)
+{
+       wait_entry->fn = fn;
+       wait_entry->fn_data = fn_data;
+       wait_entry->cmd = cmd;
+       wait_entry->triggered = false;
+       wait_entry->aborted = false;
+
+       spin_lock_bh(&shrd->notif_wait_lock);
+       list_add(&wait_entry->list, &shrd->notif_waits);
+       spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+int iwl_wait_notification(struct iwl_shared *shrd,
+                            struct iwl_notification_wait *wait_entry,
+                            unsigned long timeout)
+{
+       int ret;
+
+       ret = wait_event_timeout(shrd->notif_waitq,
+                                wait_entry->triggered || wait_entry->aborted,
+                                timeout);
+
+       spin_lock_bh(&shrd->notif_wait_lock);
+       list_del(&wait_entry->list);
+       spin_unlock_bh(&shrd->notif_wait_lock);
+
+       if (wait_entry->aborted)
+               return -EIO;
+
+       /* return value is always >= 0 */
+       if (ret <= 0)
+               return -ETIMEDOUT;
+       return 0;
+}
+
+void iwl_remove_notification(struct iwl_shared *shrd,
+                               struct iwl_notification_wait *wait_entry)
+{
+       spin_lock_bh(&shrd->notif_wait_lock);
+       list_del(&wait_entry->list);
+       spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+void iwl_abort_notification_waits(struct iwl_shared *shrd)
+{
+       unsigned long flags;
+       struct iwl_notification_wait *wait_entry;
+
+       spin_lock_irqsave(&shrd->notif_wait_lock, flags);
+       list_for_each_entry(wait_entry, &shrd->notif_waits, list)
+               wait_entry->aborted = true;
+       spin_unlock_irqrestore(&shrd->notif_wait_lock, flags);
+
+       wake_up_all(&shrd->notif_waitq);
+}
+
+#define UCODE_ALIVE_TIMEOUT    HZ
+#define UCODE_CALIB_TIMEOUT    (2*HZ)
+
+int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
+                                enum iwl_ucode_type ucode_type)
+{
+       struct iwl_notification_wait alive_wait;
+       struct iwlagn_alive_data alive_data;
+       struct iwl_trans *trans = trans(priv);
+       int ret;
+       enum iwl_ucode_type old_type;
+
+       ret = iwl_trans_start_device(trans);
+       if (ret)
+               return ret;
+
+       iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
+                                     iwl_alive_fn, &alive_data);
+
+       old_type = trans->shrd->ucode_type;
+       trans->shrd->ucode_type = ucode_type;
+
+       ret = iwl_load_given_ucode(trans, ucode_type);
+       if (ret) {
+               trans->shrd->ucode_type = old_type;
+               iwl_remove_notification(trans->shrd, &alive_wait);
+               return ret;
+       }
+
+       iwl_trans_kick_nic(trans);
+
+       /*
+        * Some things may run in the background now, but we
+        * just wait for the ALIVE notification here.
+        */
+       ret = iwl_wait_notification(trans->shrd, &alive_wait,
+                                       UCODE_ALIVE_TIMEOUT);
+       if (ret) {
+               trans->shrd->ucode_type = old_type;
+               return ret;
+       }
+
+       if (!alive_data.valid) {
+               IWL_ERR(priv, "Loaded ucode is not valid!\n");
+               trans->shrd->ucode_type = old_type;
+               return -EIO;
+       }
+
+       /*
+        * This step takes a long time (60-80ms!!) and
+        * WoWLAN image should be loaded quickly, so
+        * skip it for WoWLAN.
+        */
+       if (ucode_type != IWL_UCODE_WOWLAN) {
+               ret = iwl_verify_ucode(trans, ucode_type);
+               if (ret) {
+                       trans->shrd->ucode_type = old_type;
+                       return ret;
+               }
+
+               /* delay a bit to give rfkill time to run */
+               msleep(5);
+       }
+
+       ret = iwl_alive_notify(priv);
+       if (ret) {
+               IWL_WARN(priv,
+                       "Could not complete ALIVE transition: %d\n", ret);
+               trans->shrd->ucode_type = old_type;
+               return ret;
+       }
+
+       return 0;
+}
+
+int iwlagn_run_init_ucode(struct iwl_priv *priv)
+{
+       struct iwl_notification_wait calib_wait;
+       int ret;
+
+       lockdep_assert_held(&priv->shrd->mutex);
+
+       /* No init ucode required? Curious, but maybe ok */
+       if (!trans(priv)->ucode_init.code.len)
+               return 0;
+
+       if (priv->shrd->ucode_type != IWL_UCODE_NONE)
+               return 0;
+
+       iwl_init_notification_wait(priv->shrd, &calib_wait,
+                                     CALIBRATION_COMPLETE_NOTIFICATION,
+                                     NULL, NULL);
+
+       /* Will also start the device */
+       ret = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
+       if (ret)
+               goto error;
+
+       ret = iwlagn_init_alive_start(priv);
+       if (ret)
+               goto error;
+
+       /*
+        * Some things may run in the background now, but we
+        * just wait for the calibration complete notification.
+        */
+       ret = iwl_wait_notification(priv->shrd, &calib_wait,
+                                       UCODE_CALIB_TIMEOUT);
+
+       goto out;
+
+ error:
+       iwl_remove_notification(priv->shrd, &calib_wait);
+ out:
+       /* Whatever happened, stop the device */
+       iwl_trans_stop_device(trans(priv));
+       return ret;
+}
index 98a179f..1f868b1 100644 (file)
@@ -91,11 +91,11 @@ static struct iwm_conf def_iwm_conf = {
        .mac_addr               = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
 };
 
-static int modparam_reset;
+static bool modparam_reset;
 module_param_named(reset, modparam_reset, bool, 0644);
 MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
 
-static int modparam_wimax_enable = 1;
+static bool modparam_wimax_enable = true;
 module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644);
 MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])");
 
@@ -130,7 +130,7 @@ static void iwm_disconnect_work(struct work_struct *work)
                iwm_invalidate_mlme_profile(iwm);
 
        clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
-       iwm->umac_profile_active = 0;
+       iwm->umac_profile_active = false;
        memset(iwm->bssid, 0, ETH_ALEN);
        iwm->channel = 0;
 
index a414768..7d708f4 100644 (file)
@@ -660,7 +660,7 @@ static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf,
        clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
        clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
 
-       iwm->umac_profile_active = 0;
+       iwm->umac_profile_active = false;
        memset(iwm->bssid, 0, ETH_ALEN);
        iwm->channel = 0;
 
@@ -735,7 +735,7 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
                             umac_sta->mac_addr,
                             umac_sta->flags & UMAC_STA_FLAG_QOS);
 
-               sta->valid = 1;
+               sta->valid = true;
                sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
                sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
                memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
@@ -750,12 +750,12 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
                sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
 
                if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
-                       sta->valid = 0;
+                       sta->valid = false;
 
                break;
        case UMAC_OPCODE_CLEAR_ALL:
                for (i = 0; i < IWM_STA_TABLE_NUM; i++)
-                       iwm->sta_table[i].valid = 0;
+                       iwm->sta_table[i].valid = false;
 
                break;
        default:
@@ -1203,7 +1203,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
 
        switch (hdr->oid) {
        case UMAC_WIFI_IF_CMD_SET_PROFILE:
-               iwm->umac_profile_active = 1;
+               iwm->umac_profile_active = true;
                break;
        default:
                break;
@@ -1363,7 +1363,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
         */
        list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
                if (cmd->seq_num == seq_num) {
-                       cmd->resp_received = 1;
+                       cmd->resp_received = true;
                        cmd->buf.len = buf_size;
                        memcpy(cmd->buf.hdr, buf, buf_size);
                        wake_up_interruptible(&iwm->nonwifi_queue);
index e269351..3f7bf4d 100644 (file)
@@ -859,7 +859,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
         * Most of the libertas cards can do unaligned register access, but some
         * weird ones cannot. That's especially true for the CF8305 card.
         */
-       card->align_regs = 0;
+       card->align_regs = false;
 
        card->model = get_model(p_dev->manf_id, p_dev->card_id);
        if (card->model == MODEL_UNKNOWN) {
@@ -871,7 +871,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
        /* Check if we have a current silicon */
        prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID);
        if (card->model == MODEL_8305) {
-               card->align_regs = 1;
+               card->align_regs = true;
                if (prod_id < IF_CS_CF8305_B1_REV) {
                        pr_err("8305 rev B0 and older are not supported\n");
                        ret = -ENODEV;
index 728baa4..50b1ee7 100644 (file)
@@ -1291,7 +1291,6 @@ static struct spi_driver libertas_spi_driver = {
        .remove = __devexit_p(libertas_spi_remove),
        .driver = {
                .name   = "libertas_spi",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
                .pm     = &if_spi_pm_ops,
        },
index ceb51b6..a034572 100644 (file)
@@ -719,11 +719,11 @@ void lbtf_bcn_sent(struct lbtf_private *priv)
                return;
 
        if (skb_queue_empty(&priv->bc_ps_buf)) {
-               bool tx_buff_bc = 0;
+               bool tx_buff_bc = false;
 
                while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
                        skb_queue_tail(&priv->bc_ps_buf, skb);
-                       tx_buff_bc = 1;
+                       tx_buff_bc = true;
                }
                if (tx_buff_bc) {
                        ieee80211_stop_queues(priv->hw);
index 6cf6d6d..4b9e730 100644 (file)
@@ -37,7 +37,8 @@ MODULE_AUTHOR("Jouni Malinen");
 MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
 MODULE_LICENSE("GPL");
 
-int wmediumd_pid;
+static u32 wmediumd_pid;
+
 static int radios = 2;
 module_param(radios, int, 0444);
 MODULE_PARM_DESC(radios, "Number of simulated radios");
@@ -665,7 +666,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        bool ack;
        struct ieee80211_tx_info *txi;
-       int _pid;
+       u32 _pid;
 
        mac80211_hwsim_monitor_rx(hw, skb);
 
@@ -676,7 +677,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        }
 
        /* wmediumd mode check */
-       _pid = wmediumd_pid;
+       _pid = ACCESS_ONCE(wmediumd_pid);
 
        if (_pid)
                return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -707,7 +708,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
 {
        struct mac80211_hwsim_data *data = hw->priv;
        wiphy_debug(hw->wiphy, "%s\n", __func__);
-       data->started = 1;
+       data->started = true;
        return 0;
 }
 
@@ -715,7 +716,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
 static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
 {
        struct mac80211_hwsim_data *data = hw->priv;
-       data->started = 0;
+       data->started = false;
        del_timer(&data->beacon_timer);
        wiphy_debug(hw->wiphy, "%s\n", __func__);
 }
@@ -764,7 +765,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
        struct ieee80211_hw *hw = arg;
        struct sk_buff *skb;
        struct ieee80211_tx_info *info;
-       int _pid;
+       u32 _pid;
 
        hwsim_check_magic(vif);
 
@@ -781,7 +782,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
        mac80211_hwsim_monitor_rx(hw, skb);
 
        /* wmediumd mode check */
-       _pid = wmediumd_pid;
+       _pid = ACCESS_ONCE(wmediumd_pid);
 
        if (_pid)
                return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -1254,7 +1255,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
        struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
        struct sk_buff *skb;
        struct ieee80211_pspoll *pspoll;
-       int _pid;
+       u32 _pid;
 
        if (!vp->assoc)
                return;
@@ -1275,7 +1276,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
        memcpy(pspoll->ta, mac, ETH_ALEN);
 
        /* wmediumd mode check */
-       _pid = wmediumd_pid;
+       _pid = ACCESS_ONCE(wmediumd_pid);
 
        if (_pid)
                return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1292,7 +1293,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
        struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
        struct sk_buff *skb;
        struct ieee80211_hdr *hdr;
-       int _pid;
+       u32 _pid;
 
        if (!vp->assoc)
                return;
@@ -1314,7 +1315,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
        memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
 
        /* wmediumd mode check */
-       _pid = wmediumd_pid;
+       _pid = ACCESS_ONCE(wmediumd_pid);
 
        if (_pid)
                return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1634,8 +1635,6 @@ static int hwsim_init_netlink(void)
        int rc;
        printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
 
-       wmediumd_pid = 0;
-
        rc = genl_register_family_with_ops(&hwsim_genl_family,
                hwsim_ops, ARRAY_SIZE(hwsim_ops));
        if (rc)
index e9ab9a3..787dbe2 100644 (file)
@@ -120,10 +120,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
 static int
 mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
                              enum nl80211_tx_power_setting type,
-                             int dbm)
+                             int mbm)
 {
        struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
        struct mwifiex_power_cfg power_cfg;
+       int dbm = MBM_TO_DBM(mbm);
 
        if (type == NL80211_TX_POWER_FIXED) {
                power_cfg.is_power_auto = 0;
@@ -750,17 +751,13 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
-       if (priv->disconnect)
-               return -EBUSY;
-
-       priv->disconnect = 1;
        if (mwifiex_deauthenticate(priv, NULL))
                return -EFAULT;
 
        wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
                " reason code %d\n", priv->cfg_bssid, reason_code);
 
-       queue_work(priv->workqueue, &priv->cfg_workqueue);
+       memset(priv->cfg_bssid, 0, ETH_ALEN);
 
        return 0;
 }
@@ -980,27 +977,32 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
        int ret = 0;
 
-       if (priv->assoc_request)
-               return -EBUSY;
-
        if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
                wiphy_err(wiphy, "received infra assoc request "
                                "when station is in ibss mode\n");
                goto done;
        }
 
-       priv->assoc_request = -EINPROGRESS;
-
        wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
               (char *) sme->ssid, sme->bssid);
 
        ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
                                     priv->bss_mode, sme->channel, sme, 0);
-
-       priv->assoc_request = 1;
 done:
-       priv->assoc_result = ret;
-       queue_work(priv->workqueue, &priv->cfg_workqueue);
+       if (!ret) {
+               cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
+                                       NULL, 0, WLAN_STATUS_SUCCESS,
+                                       GFP_KERNEL);
+               dev_dbg(priv->adapter->dev,
+                       "info: associated to bssid %pM successfully\n",
+                       priv->cfg_bssid);
+       } else {
+               dev_dbg(priv->adapter->dev,
+                       "info: association to bssid %pM failed\n",
+                       priv->cfg_bssid);
+               memset(priv->cfg_bssid, 0, ETH_ALEN);
+       }
+
        return ret;
 }
 
@@ -1017,28 +1019,29 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
        struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
        int ret = 0;
 
-       if (priv->ibss_join_request)
-               return -EBUSY;
-
        if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
                wiphy_err(wiphy, "request to join ibss received "
                                "when station is not in ibss mode\n");
                goto done;
        }
 
-       priv->ibss_join_request = -EINPROGRESS;
-
        wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
               (char *) params->ssid, params->bssid);
 
        ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
                                params->bssid, priv->bss_mode,
                                params->channel, NULL, params->privacy);
-
-       priv->ibss_join_request = 1;
 done:
-       priv->ibss_join_result = ret;
-       queue_work(priv->workqueue, &priv->cfg_workqueue);
+       if (!ret) {
+               cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
+               dev_dbg(priv->adapter->dev,
+                       "info: joined/created adhoc network with bssid"
+                       " %pM successfully\n", priv->cfg_bssid);
+       } else {
+               dev_dbg(priv->adapter->dev,
+                       "info: failed creating/joining adhoc network\n");
+       }
+
        return ret;
 }
 
@@ -1053,17 +1056,12 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
 {
        struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
 
-       if (priv->disconnect)
-               return -EBUSY;
-
-       priv->disconnect = 1;
-
        wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
                        priv->cfg_bssid);
        if (mwifiex_deauthenticate(priv, NULL))
                return -EFAULT;
 
-       queue_work(priv->workqueue, &priv->cfg_workqueue);
+       memset(priv->cfg_bssid, 0, ETH_ALEN);
 
        return 0;
 }
@@ -1080,15 +1078,42 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
                      struct cfg80211_scan_request *request)
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+       int i;
+       struct ieee80211_channel *chan;
 
        wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
 
-       if (priv->scan_request && priv->scan_request != request)
-               return -EBUSY;
-
        priv->scan_request = request;
 
-       queue_work(priv->workqueue, &priv->cfg_workqueue);
+       priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
+                                       GFP_KERNEL);
+       if (!priv->user_scan_cfg) {
+               dev_err(priv->adapter->dev, "failed to alloc scan_req\n");
+               return -ENOMEM;
+       }
+       for (i = 0; i < request->n_ssids; i++) {
+               memcpy(priv->user_scan_cfg->ssid_list[i].ssid,
+                       request->ssids[i].ssid, request->ssids[i].ssid_len);
+               priv->user_scan_cfg->ssid_list[i].max_len =
+                       request->ssids[i].ssid_len;
+       }
+       for (i = 0; i < request->n_channels; i++) {
+               chan = request->channels[i];
+               priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
+               priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
+
+               if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+                       priv->user_scan_cfg->chan_list[i].scan_type =
+                               MWIFIEX_SCAN_TYPE_PASSIVE;
+               else
+                       priv->user_scan_cfg->chan_list[i].scan_type =
+                               MWIFIEX_SCAN_TYPE_ACTIVE;
+
+               priv->user_scan_cfg->chan_list[i].scan_time = 0;
+       }
+       if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
+               return -EFAULT;
+
        return 0;
 }
 
@@ -1294,10 +1319,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
 
        priv->media_connected = false;
 
-       cancel_work_sync(&priv->cfg_workqueue);
-       flush_workqueue(priv->workqueue);
-       destroy_workqueue(priv->workqueue);
-
        priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
 
        return 0;
@@ -1375,9 +1396,6 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
        memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
        wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 
-       /* We are using custom domains */
-       wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-
        /* Reserve space for bss band information */
        wdev->wiphy->bss_priv_size = sizeof(u8);
 
@@ -1406,100 +1424,3 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
 
        return ret;
 }
-
-/*
- * This function handles the result of different pending network operations.
- *
- * The following operations are handled and CFG802.11 subsystem is
- * notified accordingly -
- *      - Scan request completion
- *      - Association request completion
- *      - IBSS join request completion
- *      - Disconnect request completion
- */
-void
-mwifiex_cfg80211_results(struct work_struct *work)
-{
-       struct mwifiex_private *priv =
-               container_of(work, struct mwifiex_private, cfg_workqueue);
-       struct mwifiex_user_scan_cfg *scan_req;
-       int ret = 0, i;
-       struct ieee80211_channel *chan;
-
-       if (priv->scan_request) {
-               scan_req = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
-                                  GFP_KERNEL);
-               if (!scan_req) {
-                       dev_err(priv->adapter->dev, "failed to alloc "
-                                                   "scan_req\n");
-                       return;
-               }
-               for (i = 0; i < priv->scan_request->n_ssids; i++) {
-                       memcpy(scan_req->ssid_list[i].ssid,
-                                       priv->scan_request->ssids[i].ssid,
-                                       priv->scan_request->ssids[i].ssid_len);
-                       scan_req->ssid_list[i].max_len =
-                                       priv->scan_request->ssids[i].ssid_len;
-               }
-               for (i = 0; i < priv->scan_request->n_channels; i++) {
-                       chan = priv->scan_request->channels[i];
-                       scan_req->chan_list[i].chan_number = chan->hw_value;
-                       scan_req->chan_list[i].radio_type = chan->band;
-                       if (chan->flags & IEEE80211_CHAN_DISABLED)
-                               scan_req->chan_list[i].scan_type =
-                                       MWIFIEX_SCAN_TYPE_PASSIVE;
-                       else
-                               scan_req->chan_list[i].scan_type =
-                                       MWIFIEX_SCAN_TYPE_ACTIVE;
-                       scan_req->chan_list[i].scan_time = 0;
-               }
-               if (mwifiex_set_user_scan_ioctl(priv, scan_req))
-                       ret = -EFAULT;
-               priv->scan_result_status = ret;
-               dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n",
-                                                       __func__);
-               cfg80211_scan_done(priv->scan_request,
-                               (priv->scan_result_status < 0));
-               priv->scan_request = NULL;
-               kfree(scan_req);
-       }
-
-       if (priv->assoc_request == 1) {
-               if (!priv->assoc_result) {
-                       cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
-                                               NULL, 0, NULL, 0,
-                                               WLAN_STATUS_SUCCESS,
-                                               GFP_KERNEL);
-                       dev_dbg(priv->adapter->dev,
-                               "info: associated to bssid %pM successfully\n",
-                              priv->cfg_bssid);
-               } else {
-                       dev_dbg(priv->adapter->dev,
-                               "info: association to bssid %pM failed\n",
-                              priv->cfg_bssid);
-                       memset(priv->cfg_bssid, 0, ETH_ALEN);
-               }
-               priv->assoc_request = 0;
-               priv->assoc_result = 0;
-       }
-
-       if (priv->ibss_join_request == 1) {
-               if (!priv->ibss_join_result) {
-                       cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
-                                            GFP_KERNEL);
-                       dev_dbg(priv->adapter->dev,
-                               "info: joined/created adhoc network with bssid"
-                                       " %pM successfully\n", priv->cfg_bssid);
-               } else {
-                       dev_dbg(priv->adapter->dev,
-                               "info: failed creating/joining adhoc network\n");
-               }
-               priv->ibss_join_request = 0;
-               priv->ibss_join_result = 0;
-       }
-
-       if (priv->disconnect) {
-               memset(priv->cfg_bssid, 0, ETH_ALEN);
-               priv->disconnect = 0;
-       }
-}
index 8d010f2..76c76c6 100644 (file)
@@ -26,5 +26,4 @@
 
 int mwifiex_register_cfg80211(struct mwifiex_private *);
 
-void mwifiex_cfg80211_results(struct work_struct *work);
 #endif
index ac27815..6e0a3ea 100644 (file)
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
 {
        struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
        unsigned long cmd_flags;
-       unsigned long cmd_pending_q_flags;
        unsigned long scan_pending_q_flags;
        uint16_t cancel_scan_cmd = false;
 
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                cmd_node = adapter->curr_cmd;
                cmd_node->wait_q_enabled = false;
                cmd_node->cmd_flag |= CMD_F_CANCELED;
-               spin_lock_irqsave(&adapter->cmd_pending_q_lock,
-                                 cmd_pending_q_flags);
-               list_del(&cmd_node->list);
-               spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
-                                      cmd_pending_q_flags);
                mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+               mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+               adapter->curr_cmd = NULL;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
        }
 
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
        }
        adapter->cmd_wait_q.status = -1;
-       mwifiex_complete_cmd(adapter, adapter->curr_cmd);
 }
 
 /*
index 2694045..244c728 100644 (file)
@@ -282,6 +282,45 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        adapter->arp_filter_size = 0;
 }
 
+/*
+ * This function sets trans_start per tx_queue
+ */
+void mwifiex_set_trans_start(struct net_device *dev)
+{
+       int i;
+
+       for (i = 0; i < dev->num_tx_queues; i++)
+               netdev_get_tx_queue(dev, i)->trans_start = jiffies;
+
+       dev->trans_start = jiffies;
+}
+
+/*
+ * This function wakes up all queues in net_device
+ */
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+                                       struct mwifiex_adapter *adapter)
+{
+       unsigned long dev_queue_flags;
+
+       spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+       netif_tx_wake_all_queues(netdev);
+       spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
+/*
+ * This function stops all queues in net_device
+ */
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+                                       struct mwifiex_adapter *adapter)
+{
+       unsigned long dev_queue_flags;
+
+       spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+       netif_tx_stop_all_queues(netdev);
+       spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
 /*
  *  This function releases the lock variables and frees the locks and
  *  associated locks.
@@ -359,6 +398,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
        spin_lock_init(&adapter->int_lock);
        spin_lock_init(&adapter->main_proc_lock);
        spin_lock_init(&adapter->mwifiex_cmd_lock);
+       spin_lock_init(&adapter->queue_lock);
        for (i = 0; i < adapter->priv_num; i++) {
                if (adapter->priv[i]) {
                        priv = adapter->priv[i];
index 67e6db7..84be196 100644 (file)
@@ -401,7 +401,7 @@ mwifiex_fill_buffer(struct sk_buff *skb)
 static int
 mwifiex_open(struct net_device *dev)
 {
-       netif_start_queue(dev);
+       netif_tx_start_all_queues(dev);
        return 0;
 }
 
@@ -465,8 +465,8 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        atomic_inc(&priv->adapter->tx_pending);
 
        if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
-               netif_stop_queue(priv->netdev);
-               dev->trans_start = jiffies;
+               mwifiex_set_trans_start(dev);
+               mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
        }
 
        queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
@@ -533,7 +533,7 @@ mwifiex_tx_timeout(struct net_device *dev)
 
        dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n",
                                jiffies, priv->bss_index);
-       dev->trans_start = jiffies;
+       mwifiex_set_trans_start(dev);
        priv->num_tx_timeout++;
 }
 
@@ -586,8 +586,6 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
        priv->media_connected = false;
        memset(&priv->nick_name, 0, sizeof(priv->nick_name));
        priv->num_tx_timeout = 0;
-       priv->workqueue = create_singlethread_workqueue("cfg80211_wq");
-       INIT_WORK(&priv->cfg_workqueue, mwifiex_cfg80211_results);
        memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
 }
 
@@ -793,7 +791,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
                priv = adapter->priv[i];
                if (priv && priv->netdev) {
                        if (!netif_queue_stopped(priv->netdev))
-                               netif_stop_queue(priv->netdev);
+                               mwifiex_stop_net_dev_queue(priv->netdev,
+                                                               adapter);
                        if (netif_carrier_ok(priv->netdev))
                                netif_carrier_off(priv->netdev);
                }
index 3861a61..9207fc6 100644 (file)
@@ -453,15 +453,8 @@ struct mwifiex_private {
        u8 scan_pending_on_block;
        u8 report_scan_result;
        struct cfg80211_scan_request *scan_request;
-       int scan_result_status;
-       int assoc_request;
-       u16 assoc_result;
-       int ibss_join_request;
-       u16 ibss_join_result;
-       bool disconnect;
+       struct mwifiex_user_scan_cfg *user_scan_cfg;
        u8 cfg_bssid[6];
-       struct workqueue_struct *workqueue;
-       struct work_struct cfg_workqueue;
        u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
        struct wps wps;
        u8 scan_block;
@@ -655,10 +648,19 @@ struct mwifiex_adapter {
        struct mwifiex_wait_queue cmd_wait_q;
        u8 scan_wait_q_woken;
        struct cmd_ctrl_node *cmd_queued;
+       spinlock_t queue_lock;          /* lock for tx queues */
 };
 
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
 
+void mwifiex_set_trans_start(struct net_device *dev);
+
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+               struct mwifiex_adapter *adapter);
+
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+               struct mwifiex_adapter *adapter);
+
 int mwifiex_init_fw(struct mwifiex_adapter *adapter);
 
 int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
index a2f3200..4053509 100644 (file)
@@ -386,7 +386,7 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
        card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL);
        if (!card->txbd_ring_vbase) {
                dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n");
-               return -1;
+               return -ENOMEM;
        }
        card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase);
 
@@ -476,7 +476,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
        if (!card->rxbd_ring_vbase) {
                dev_err(adapter->dev, "Unable to allocate buffer for "
                                "rxbd_ring.\n");
-               return -1;
+               return -ENOMEM;
        }
        card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase);
 
@@ -569,7 +569,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
        if (!card->evtbd_ring_vbase) {
                dev_err(adapter->dev, "Unable to allocate buffer. "
                                "Terminating download\n");
-               return -1;
+               return -ENOMEM;
        }
        card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase);
 
@@ -1231,15 +1231,13 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
        if (rdptr >= MWIFIEX_MAX_EVT_BD) {
                dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
                                        rdptr);
-               ret = -EINVAL;
-               goto done;
+               return -EINVAL;
        }
 
        /* Read the event ring write pointer set by firmware */
        if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
                dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n");
-               ret = -1;
-               goto done;
+               return -1;
        }
 
        if (!card->evt_buf_list[rdptr]) {
@@ -1268,15 +1266,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
        /* Write the event ring read pointer in to REG_EVTBD_RDPTR */
        if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
                dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n");
-               ret = -1;
-               goto done;
+               return -1;
        }
 
-done:
-       /* Free the buffer for failure case */
-       if (ret && skb)
-               dev_kfree_skb_any(skb);
-
        dev_dbg(adapter->dev, "info: Check Events Again\n");
        ret = mwifiex_pcie_process_event_ready(adapter);
 
index b8b9d37..e2e7156 100644 (file)
@@ -1391,11 +1391,8 @@ int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
 {
        int status;
 
-       priv->adapter->scan_wait_q_woken = false;
-
        status = mwifiex_scan_networks(priv, scan_req);
-       if (!status)
-               status = mwifiex_wait_queue_complete(priv->adapter);
+       queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
 
        return status;
 }
@@ -1796,6 +1793,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                        up(&priv->async_sem);
                }
 
+               if (priv->user_scan_cfg) {
+                       dev_dbg(priv->adapter->dev, "info: %s: sending scan "
+                                                       "results\n", __func__);
+                       cfg80211_scan_done(priv->scan_request, 0);
+                       priv->scan_request = NULL;
+                       kfree(priv->user_scan_cfg);
+                       priv->user_scan_cfg = NULL;
+               }
        } else {
                /* Get scan command from scan_pending_q and put to
                   cmd_pending_q */
index 702452b..d39d845 100644 (file)
@@ -1087,7 +1087,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                                           (adapter->ioport | 0x1000 |
                                            (card->mpa_rx.ports << 4)) +
                                           card->mpa_rx.start_port, 1))
-                       return -1;
+                       goto error;
 
                curr_ptr = card->mpa_rx.buf;
 
@@ -1130,12 +1130,29 @@ rx_curr_single:
                if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
                                              skb->data, skb->len,
                                              adapter->ioport + port))
-                       return -1;
+                       goto error;
 
                mwifiex_decode_rx_packet(adapter, skb, pkt_type);
        }
 
        return 0;
+
+error:
+       if (MP_RX_AGGR_IN_PROGRESS(card)) {
+               /* Multiport-aggregation transfer failed - cleanup */
+               for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
+                       /* copy pkt to deaggr buf */
+                       skb_deaggr = card->mpa_rx.skb_arr[pind];
+                       dev_kfree_skb_any(skb_deaggr);
+               }
+               MP_RX_AGGR_BUF_RESET(card);
+       }
+
+       if (f_do_rx_cur)
+               /* Single transfer pending. Free curr buff also */
+               dev_kfree_skb_any(skb);
+
+       return -1;
 }
 
 /*
@@ -1271,7 +1288,6 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
 
                                dev_dbg(adapter->dev,
                                                "info: CFG reg val =%x\n", cr);
-                               dev_kfree_skb_any(skb);
                                return -1;
                        }
                }
index f204810..d7aa21d 100644 (file)
@@ -115,18 +115,17 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
        if (adapter->num_cmd_timeout && adapter->curr_cmd)
                return;
        priv->media_connected = false;
-       if (!priv->disconnect) {
-               priv->disconnect = 1;
-               dev_dbg(adapter->dev, "info: successfully disconnected from"
-                               " %pM: reason code %d\n", priv->cfg_bssid,
-                               WLAN_REASON_DEAUTH_LEAVING);
-               cfg80211_disconnected(priv->netdev,
-                               WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
-                               GFP_KERNEL);
-               queue_work(priv->workqueue, &priv->cfg_workqueue);
+       dev_dbg(adapter->dev, "info: successfully disconnected from"
+                       " %pM: reason code %d\n", priv->cfg_bssid,
+                       WLAN_REASON_DEAUTH_LEAVING);
+       if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+               cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING,
+                                     NULL, 0, GFP_KERNEL);
        }
+       memset(priv->cfg_bssid, 0, ETH_ALEN);
+
        if (!netif_queue_stopped(priv->netdev))
-               netif_stop_queue(priv->netdev);
+               mwifiex_stop_net_dev_queue(priv->netdev, adapter);
        if (netif_carrier_ok(priv->netdev))
                netif_carrier_off(priv->netdev);
        /* Reset wireless stats signal info */
@@ -201,7 +200,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                if (!netif_carrier_ok(priv->netdev))
                        netif_carrier_on(priv->netdev);
                if (netif_queue_stopped(priv->netdev))
-                       netif_wake_queue(priv->netdev);
+                       mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
                break;
 
        case EVENT_DEAUTHENTICATED:
@@ -292,7 +291,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                priv->adhoc_is_link_sensed = false;
                mwifiex_clean_txrx(priv);
                if (!netif_queue_stopped(priv->netdev))
-                       netif_stop_queue(priv->netdev);
+                       mwifiex_stop_net_dev_queue(priv->netdev, adapter);
                if (netif_carrier_ok(priv->netdev))
                        netif_carrier_off(priv->netdev);
                break;
index 4b6f553..6d990c7 100644 (file)
@@ -234,7 +234,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
                                      "associating...\n");
 
                if (!netif_queue_stopped(priv->netdev))
-                       netif_stop_queue(priv->netdev);
+                       mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 
                /* Clear any past association response stored for
                 * application retrieval */
@@ -265,7 +265,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
                ret = mwifiex_check_network_compatibility(priv, bss_desc);
 
                if (!netif_queue_stopped(priv->netdev))
-                       netif_stop_queue(priv->netdev);
+                       mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 
                if (!ret) {
                        dev_dbg(adapter->dev, "info: network found in scan"
index a206f41..d9274a1 100644 (file)
@@ -134,7 +134,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
        if (!priv)
                goto done;
 
-       priv->netdev->trans_start = jiffies;
+       mwifiex_set_trans_start(priv->netdev);
        if (!status) {
                priv->stats.tx_packets++;
                priv->stats.tx_bytes += skb->len;
@@ -152,7 +152,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA)
                                && (tpriv->media_connected)) {
                        if (netif_queue_stopped(tpriv->netdev))
-                               netif_wake_queue(tpriv->netdev);
+                               mwifiex_wake_up_net_dev_queue(tpriv->netdev,
+                                                               adapter);
                }
        }
 done:
index 995695c..e75d5c8 100644 (file)
@@ -31,7 +31,7 @@
 #define MWL8K_VERSION  "0.12"
 
 /* Module parameters */
-static unsigned ap_mode_default;
+static bool ap_mode_default;
 module_param(ap_mode_default, bool, 0);
 MODULE_PARM_DESC(ap_mode_default,
                 "Set to 1 to make ap mode the default instead of sta mode");
@@ -738,10 +738,10 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
 
                ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE);
                if (ready_code == MWL8K_FWAP_READY) {
-                       priv->ap_fw = 1;
+                       priv->ap_fw = true;
                        break;
                } else if (ready_code == MWL8K_FWSTA_READY) {
-                       priv->ap_fw = 0;
+                       priv->ap_fw = false;
                        break;
                }
 
@@ -5517,8 +5517,8 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
        INIT_LIST_HEAD(&priv->vif_list);
 
        /* Set default radio state and preamble */
-       priv->radio_on = 0;
-       priv->radio_short_preamble = 0;
+       priv->radio_on = false;
+       priv->radio_short_preamble = false;
 
        /* Finalize join worker */
        INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
index b52acc4..9fb77d0 100644 (file)
@@ -121,7 +121,7 @@ module_param(orinoco_debug, int, 0644);
 MODULE_PARM_DESC(orinoco_debug, "Debug level");
 #endif
 
-static int suppress_linkstatus; /* = 0 */
+static bool suppress_linkstatus; /* = 0 */
 module_param(suppress_linkstatus, bool, 0644);
 MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
 
index db4d9a0..af2ca1a 100644 (file)
@@ -27,7 +27,7 @@
 #include "p54.h"
 #include "lmac.h"
 
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
index 2d5cf5b..7faed62 100644 (file)
@@ -700,7 +700,6 @@ static int __devexit p54spi_remove(struct spi_device *spi)
 static struct spi_driver p54spi_driver = {
        .driver = {
                .name           = "p54spi",
-               .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
        },
 
index bc2ba80..4e44b1a 100644 (file)
@@ -2493,323 +2493,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
        return ret;
 }
 
-/* Note: currently, use hostapd ioctl from the Host AP driver for WPA
- * support. This is to be replaced with Linux wireless extensions once they
- * get WPA support. */
-
-/* Note II: please leave all this together as it will be easier to remove later,
- * once wireless extensions add WPA support -mcgrof */
-
-/* PRISM54_HOSTAPD ioctl() cmd: */
-enum {
-       PRISM2_SET_ENCRYPTION = 6,
-       PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
-       PRISM2_HOSTAPD_MLME = 13,
-       PRISM2_HOSTAPD_SCAN_REQ = 14,
-};
-
 #define PRISM54_SET_WPA                        SIOCIWFIRSTPRIV+12
-#define PRISM54_HOSTAPD                        SIOCIWFIRSTPRIV+25
-#define PRISM54_DROP_UNENCRYPTED       SIOCIWFIRSTPRIV+26
-
-#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
-#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
-       offsetof(struct prism2_hostapd_param, u.generic_elem.data)
-
-/* Maximum length for algorithm names (-1 for nul termination)
- * used in ioctl() */
-#define HOSTAP_CRYPT_ALG_NAME_LEN 16
-
-struct prism2_hostapd_param {
-       u32 cmd;
-       u8 sta_addr[ETH_ALEN];
-       union {
-              struct {
-                      u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
-                      u32 flags;
-                      u32 err;
-                      u8 idx;
-                      u8 seq[8]; /* sequence counter (set: RX, get: TX) */
-                      u16 key_len;
-                      u8 key[0];
-                      } crypt;
-               struct {
-                       u8 len;
-                       u8 data[0];
-               } generic_elem;
-               struct {
-#define MLME_STA_DEAUTH 0
-#define MLME_STA_DISASSOC 1
-                       u16 cmd;
-                       u16 reason_code;
-               } mlme;
-               struct {
-                       u8 ssid_len;
-                       u8 ssid[32];
-               } scan_req;
-       } u;
-};
-
-
-static int
-prism2_ioctl_set_encryption(struct net_device *dev,
-       struct prism2_hostapd_param *param,
-       int param_len)
-{
-       islpci_private *priv = netdev_priv(dev);
-       int rvalue = 0, force = 0;
-       int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
-       union oid_res_t r;
-
-       /* with the new API, it's impossible to get a NULL pointer.
-        * New version of iwconfig set the IW_ENCODE_NOKEY flag
-        * when no key is given, but older versions don't. */
-
-       if (param->u.crypt.key_len > 0) {
-               /* we have a key to set */
-               int index = param->u.crypt.idx;
-               int current_index;
-               struct obj_key key = { DOT11_PRIV_TKIP, 0, "" };
-
-               /* get the current key index */
-               rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
-               current_index = r.u;
-               /* Verify that the key is not marked as invalid */
-               if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) {
-                       key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ?
-                           sizeof (param->u.crypt.key) : param->u.crypt.key_len;
-                       memcpy(key.key, param->u.crypt.key, key.length);
-                       if (key.length == 32)
-                               /* we want WPA-PSK */
-                               key.type = DOT11_PRIV_TKIP;
-                       if ((index < 0) || (index > 3))
-                               /* no index provided use the current one */
-                               index = current_index;
-
-                       /* now send the key to the card  */
-                       rvalue |=
-                           mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
-                                           &key);
-               }
-               /*
-                * If a valid key is set, encryption should be enabled
-                * (user may turn it off later).
-                * This is also how "iwconfig ethX key on" works
-                */
-               if ((index == current_index) && (key.length > 0))
-                       force = 1;
-       } else {
-               int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1;
-               if ((index >= 0) && (index <= 3)) {
-                       /* we want to set the key index */
-                       rvalue |=
-                           mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
-                                           &index);
-               } else {
-                       if (!(param->u.crypt.flags & IW_ENCODE_MODE)) {
-                               /* we cannot do anything. Complain. */
-                               return -EINVAL;
-                       }
-               }
-       }
-       /* now read the flags */
-       if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
-               /* Encoding disabled,
-                * authen = DOT11_AUTH_OS;
-                * invoke = 0;
-                * exunencrypt = 0; */
-       }
-       if (param->u.crypt.flags & IW_ENCODE_OPEN)
-               /* Encode but accept non-encoded packets. No auth */
-               invoke = 1;
-       if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) {
-               /* Refuse non-encoded packets. Auth */
-               authen = DOT11_AUTH_BOTH;
-               invoke = 1;
-               exunencrypt = 1;
-       }
-       /* do the change if requested  */
-       if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) {
-               rvalue |=
-                   mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
-               rvalue |=
-                   mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
-               rvalue |=
-                   mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
-                                   &exunencrypt);
-       }
-       return rvalue;
-}
-
-static int
-prism2_ioctl_set_generic_element(struct net_device *ndev,
-       struct prism2_hostapd_param *param,
-       int param_len)
-{
-       islpci_private *priv = netdev_priv(ndev);
-       int max_len, len, alen, ret=0;
-       struct obj_attachment *attach;
-
-       len = param->u.generic_elem.len;
-       max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
-       if (max_len < 0 || max_len < len)
-               return -EINVAL;
-
-       alen = sizeof(*attach) + len;
-       attach = kzalloc(alen, GFP_KERNEL);
-       if (attach == NULL)
-               return -ENOMEM;
-
-#define WLAN_FC_TYPE_MGMT 0
-#define WLAN_FC_STYPE_ASSOC_REQ 0
-#define WLAN_FC_STYPE_REASSOC_REQ 2
-
-       /* Note: endianness is covered by mgt_set_varlen */
-
-       attach->type = (WLAN_FC_TYPE_MGMT << 2) |
-               (WLAN_FC_STYPE_ASSOC_REQ << 4);
-       attach->id = -1;
-       attach->size = len;
-       memcpy(attach->data, param->u.generic_elem.data, len);
-
-       ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
-       if (ret == 0) {
-               attach->type = (WLAN_FC_TYPE_MGMT << 2) |
-                       (WLAN_FC_STYPE_REASSOC_REQ << 4);
-
-              ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
-              if (ret == 0)
-                      printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
-                                      ndev->name);
-       }
-
-       kfree(attach);
-       return ret;
-
-}
-
-static int
-prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param)
-{
-       return -EOPNOTSUPP;
-}
-
-static int
-prism2_ioctl_scan_req(struct net_device *ndev,
-                     struct prism2_hostapd_param *param)
-{
-       islpci_private *priv = netdev_priv(ndev);
-       struct iw_request_info info;
-       int i, rvalue;
-       struct obj_bsslist *bsslist;
-       u32 noise = 0;
-       char *extra = "";
-       char *current_ev = "foo";
-       union oid_res_t r;
-
-       if (islpci_get_state(priv) < PRV_STATE_INIT) {
-               /* device is not ready, fail gently */
-               return 0;
-       }
-
-       /* first get the noise value. We will use it to report the link quality */
-       rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
-       noise = r.u;
-
-       /* Ask the device for a list of known bss. We can report at most
-        * IW_MAX_AP=64 to the range struct. But the device won't repport anything
-        * if you change the value of IWMAX_BSS=24.
-        */
-       rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
-       bsslist = r.ptr;
-
-       info.cmd = PRISM54_HOSTAPD;
-       info.flags = 0;
-
-       /* ok now, scan the list and translate its info */
-       for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++)
-               current_ev = prism54_translate_bss(ndev, &info, current_ev,
-                                                  extra + IW_SCAN_MAX_DATA,
-                                                  &(bsslist->bsslist[i]),
-                                                  noise);
-       kfree(bsslist);
-
-       return rvalue;
-}
-
-static int
-prism54_hostapd(struct net_device *ndev, struct iw_point *p)
-{
-       struct prism2_hostapd_param *param;
-       int ret = 0;
-       u32 uwrq;
-
-       printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length);
-       if (p->length < sizeof(struct prism2_hostapd_param) ||
-           p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
-               return -EINVAL;
-
-       param = memdup_user(p->pointer, p->length);
-       if (IS_ERR(param))
-               return PTR_ERR(param);
-
-       switch (param->cmd) {
-       case PRISM2_SET_ENCRYPTION:
-              printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n",
-                              ndev->name);
-               ret = prism2_ioctl_set_encryption(ndev, param, p->length);
-               break;
-       case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
-              printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n",
-                              ndev->name);
-               ret = prism2_ioctl_set_generic_element(ndev, param,
-                                                      p->length);
-               break;
-       case PRISM2_HOSTAPD_MLME:
-              printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n",
-                              ndev->name);
-               ret = prism2_ioctl_mlme(ndev, param);
-               break;
-       case PRISM2_HOSTAPD_SCAN_REQ:
-              printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n",
-                              ndev->name);
-               ret = prism2_ioctl_scan_req(ndev, param);
-               break;
-       case PRISM54_SET_WPA:
-              printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n",
-                              ndev->name);
-              uwrq = 1;
-              ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL);
-              break;
-       case PRISM54_DROP_UNENCRYPTED:
-              printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n",
-                              ndev->name);
-#if 0
-              uwrq = 0x01;
-              mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq);
-              down_write(&priv->mib_sem);
-              mgt_commit(priv);
-              up_write(&priv->mib_sem);
-#endif
-              /* Not necessary, as set_wpa does it, should we just do it here though? */
-              ret = 0;
-              break;
-       default:
-              printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n",
-                              ndev->name);
-               ret = -EOPNOTSUPP;
-               break;
-       }
-
-       if (ret == 0 && copy_to_user(p->pointer, param, p->length))
-               ret = -EFAULT;
-
-       kfree(param);
-
-       return ret;
-}
 
 static int
 prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
@@ -3223,20 +2907,3 @@ const struct iw_handler_def prism54_handler_def = {
        .private_args = (struct iw_priv_args *) prism54_private_args,
        .get_wireless_stats = prism54_get_wireless_stats,
 };
-
-/* For wpa_supplicant */
-
-int
-prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
-       struct iwreq *wrq = (struct iwreq *) rq;
-       int ret = -1;
-       switch (cmd) {
-               case PRISM54_HOSTAPD:
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               ret = prism54_hostapd(ndev, &wrq->u.data);
-               return ret;
-       }
-       return -EOPNOTSUPP;
-}
index bcfbfb9..a34bceb 100644 (file)
@@ -43,8 +43,6 @@ void prism54_wpa_bss_ie_clean(islpci_private *priv);
 
 int prism54_set_mac_address(struct net_device *, void *);
 
-int prism54_ioctl(struct net_device *, struct ifreq *, int);
-
 extern const struct iw_handler_def prism54_handler_def;
 
 #endif                         /* _ISL_IOCTL_H */
index 8a3cf4f..5970ff6 100644 (file)
@@ -804,7 +804,6 @@ static const struct ethtool_ops islpci_ethtool_ops = {
 static const struct net_device_ops islpci_netdev_ops = {
        .ndo_open               = islpci_open,
        .ndo_stop               = islpci_close,
-       .ndo_do_ioctl           = prism54_ioctl,
        .ndo_start_xmit         = islpci_eth_transmit,
        .ndo_tx_timeout         = islpci_eth_tx_timeout,
        .ndo_set_mac_address    = prism54_set_mac_address,
index 53c5f87..de7d41f 100644 (file)
@@ -39,7 +39,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
index 1ba079d..e5df380 100644 (file)
@@ -1203,8 +1203,10 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_CONTROL));
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
                           !(filter_flags & FIF_PSPOLL));
-       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
-       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
+       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA,
+                          !(filter_flags & FIF_CONTROL));
+       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR,
+                          !(filter_flags & FIF_CONTROL));
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
                           !(filter_flags & FIF_CONTROL));
        rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
index da48c8a..4941a1a 100644 (file)
@@ -50,7 +50,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
index 3778763..b1df1a7 100644 (file)
@@ -45,7 +45,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
index edd317f..c3e1aa7 100644 (file)
@@ -831,11 +831,11 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
        if (spec->supported_rates & SUPPORT_RATE_OFDM)
                num_rates += 8;
 
-       channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL);
+       channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL);
        if (!channels)
                return -ENOMEM;
 
-       rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL);
+       rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL);
        if (!rates)
                goto exit_free_channels;
 
index bf0acff..ede3c58 100644 (file)
@@ -160,7 +160,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
  exit_fail:
        rt2x00queue_pause_queue(queue);
  exit_free_skb:
-       dev_kfree_skb_any(skb);
+       ieee80211_free_txskb(hw, skb);
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_tx);
 
index bf55b4a..e0c6d11 100644 (file)
@@ -41,7 +41,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
index cfb19db..1c69c73 100644 (file)
@@ -40,7 +40,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
index a13ecfc..74c0214 100644 (file)
@@ -396,7 +396,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw)
        u8 valid = 0;
 
        /*set init state to on */
-       rtlpriv->rfkill.rfkill_state = 1;
+       rtlpriv->rfkill.rfkill_state = true;
        wiphy_rfkill_set_hw_state(hw->wiphy, 0);
 
        radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
@@ -448,12 +448,11 @@ int rtl_init_core(struct ieee80211_hw *hw)
 
        /* <4> locks */
        mutex_init(&rtlpriv->locks.conf_mutex);
-       spin_lock_init(&rtlpriv->locks.ips_lock);
+       mutex_init(&rtlpriv->locks.ps_mutex);
        spin_lock_init(&rtlpriv->locks.irq_th_lock);
        spin_lock_init(&rtlpriv->locks.h2c_lock);
        spin_lock_init(&rtlpriv->locks.rf_ps_lock);
        spin_lock_init(&rtlpriv->locks.rf_lock);
-       spin_lock_init(&rtlpriv->locks.lps_lock);
        spin_lock_init(&rtlpriv->locks.waitq_lock);
        spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
 
index b6683a2..39e0907 100644 (file)
@@ -78,7 +78,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
        u8 init_aspm;
 
        ppsc->reg_rfps_level = 0;
-       ppsc->support_aspm = 0;
+       ppsc->support_aspm = false;
 
        /*Update PCI ASPM setting */
        ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -570,9 +570,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
                if (ieee80211_is_nullfunc(fc)) {
                        if (ieee80211_has_pm(fc)) {
                                rtlpriv->mac80211.offchan_delay = true;
-                               rtlpriv->psc.state_inap = 1;
+                               rtlpriv->psc.state_inap = true;
                        } else {
-                               rtlpriv->psc.state_inap = 0;
+                               rtlpriv->psc.state_inap = false;
                        }
                }
 
@@ -610,7 +610,7 @@ tx_status_ok:
        if (((rtlpriv->link_info.num_rx_inperiod +
                rtlpriv->link_info.num_tx_inperiod) > 8) ||
                (rtlpriv->link_info.num_rx_inperiod > 2)) {
-               tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+               schedule_work(&rtlpriv->works.lps_leave_work);
        }
 }
 
@@ -736,7 +736,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                if (((rtlpriv->link_info.num_rx_inperiod +
                        rtlpriv->link_info.num_tx_inperiod) > 8) ||
                        (rtlpriv->link_info.num_rx_inperiod > 2)) {
-                       tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+                       schedule_work(&rtlpriv->works.lps_leave_work);
                }
 
                dev_kfree_skb_any(skb);
@@ -780,6 +780,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
        unsigned long flags;
        u32 inta = 0;
        u32 intb = 0;
+       irqreturn_t ret = IRQ_HANDLED;
 
        spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
 
@@ -787,8 +788,10 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
        rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
 
        /*Shared IRQ or HW disappared */
-       if (!inta || inta == 0xffff)
+       if (!inta || inta == 0xffff) {
+               ret = IRQ_NONE;
                goto done;
+       }
 
        /*<1> beacon related */
        if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
@@ -892,7 +895,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 
 done:
        spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-       return IRQ_HANDLED;
+       return ret;
 }
 
 static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
@@ -900,11 +903,6 @@ static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
        _rtl_pci_tx_chk_waitq(hw);
 }
 
-static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw)
-{
-       rtl_lps_leave(hw);
-}
-
 static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -942,6 +940,15 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
        return;
 }
 
+static void rtl_lps_leave_work_callback(struct work_struct *work)
+{
+       struct rtl_works *rtlworks =
+           container_of(work, struct rtl_works, lps_leave_work);
+       struct ieee80211_hw *hw = rtlworks->hw;
+
+       rtl_lps_leave(hw);
+}
+
 static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
 {
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1003,9 +1010,7 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
        tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
                     (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
                     (unsigned long)hw);
-       tasklet_init(&rtlpriv->works.ips_leave_tasklet,
-                    (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet,
-                    (unsigned long)hw);
+       INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback);
 }
 
 static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
@@ -1475,7 +1480,7 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
 
        synchronize_irq(rtlpci->pdev->irq);
        tasklet_kill(&rtlpriv->works.irq_tasklet);
-       tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+       cancel_work_sync(&rtlpriv->works.lps_leave_work);
 
        flush_workqueue(rtlpriv->works.rtl_wq);
        destroy_workqueue(rtlpriv->works.rtl_wq);
@@ -1550,7 +1555,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
        set_hal_stop(rtlhal);
 
        rtlpriv->cfg->ops->disable_interrupt(hw);
-       tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+       cancel_work_sync(&rtlpriv->works.lps_leave_work);
 
        spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
        while (ppsc->rfchange_inprogress) {
index 55c8e50..a14a68b 100644 (file)
@@ -241,7 +241,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
        if (mac->opmode != NL80211_IFTYPE_STATION)
                return;
 
-       spin_lock(&rtlpriv->locks.ips_lock);
+       mutex_lock(&rtlpriv->locks.ps_mutex);
 
        if (ppsc->inactiveps) {
                rtstate = ppsc->rfpwr_state;
@@ -257,7 +257,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
                }
        }
 
-       spin_unlock(&rtlpriv->locks.ips_lock);
+       mutex_unlock(&rtlpriv->locks.ps_mutex);
 }
 
 /*for FW LPS*/
@@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
        if (mac->link_state != MAC80211_LINKED)
                return;
 
-       spin_lock_irq(&rtlpriv->locks.lps_lock);
+       mutex_lock(&rtlpriv->locks.ps_mutex);
 
        /* Idle for a while if we connect to AP a while ago. */
        if (mac->cnt_after_linked >= 2) {
@@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
                }
        }
 
-       spin_unlock_irq(&rtlpriv->locks.lps_lock);
+       mutex_unlock(&rtlpriv->locks.ps_mutex);
 }
 
 /*Leave the leisure power save mode.*/
@@ -416,9 +416,8 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       unsigned long flags;
 
-       spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
+       mutex_lock(&rtlpriv->locks.ps_mutex);
 
        if (ppsc->fwctrl_lps) {
                if (ppsc->dot11_psmode != EACTIVE) {
@@ -439,7 +438,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
                        rtl_lps_set_psmode(hw, EACTIVE);
                }
        }
-       spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
+       mutex_unlock(&rtlpriv->locks.ps_mutex);
 }
 
 /* For sw LPS*/
@@ -540,9 +539,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
                RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
        }
 
-       spin_lock_irq(&rtlpriv->locks.lps_lock);
+       mutex_lock(&rtlpriv->locks.ps_mutex);
        rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
-       spin_unlock_irq(&rtlpriv->locks.lps_lock);
+       mutex_unlock(&rtlpriv->locks.ps_mutex);
 }
 
 void rtl_swlps_rfon_wq_callback(void *data)
@@ -575,9 +574,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
        if (rtlpriv->link_info.busytraffic)
                return;
 
-       spin_lock_irq(&rtlpriv->locks.lps_lock);
+       mutex_lock(&rtlpriv->locks.ps_mutex);
        rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
-       spin_unlock_irq(&rtlpriv->locks.lps_lock);
+       mutex_unlock(&rtlpriv->locks.ps_mutex);
 
        if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
                !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
index fa393df..931d979 100644 (file)
@@ -262,10 +262,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
        u32 fwsize;
        enum version_8192c version = rtlhal->version;
 
-       pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
        if (!rtlhal->pfirmware)
                return 1;
 
+       pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
        pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
        pfwdata = (u8 *) rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
index 592a10a..3b585aa 100644 (file)
@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                }
        case ERFSLEEP:{
                        if (ppsc->rfpwr_state == ERFOFF)
-                               break;
+                               return false;
                        for (queue_id = 0, i = 0;
                             queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
                                ring = &pcipriv->dev.tx_ring[queue_id];
index f2aa33d..89ef698 100644 (file)
@@ -98,9 +98,9 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
 
        rtl8192ce_bt_reg_init(hw);
 
-       rtlpriv->dm.dm_initialgain_enable = 1;
+       rtlpriv->dm.dm_initialgain_enable = true;
        rtlpriv->dm.dm_flag = 0;
-       rtlpriv->dm.disable_framebursting = 0;
+       rtlpriv->dm.disable_framebursting = false;
        rtlpriv->dm.thermalvalue = 0;
        rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
 
index 4ed973a..124cf63 100644 (file)
@@ -2436,7 +2436,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
                         "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
        }
        if (actuallyset) {
-               ppsc->hwradiooff = 1;
+               ppsc->hwradiooff = true;
                if (e_rfpowerstate_toset == ERFON) {
                        if ((ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM) &&
                             RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
index 7285290..e49cf22 100644 (file)
@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
                break;
        case ERFSLEEP:
                if (ppsc->rfpwr_state == ERFOFF)
-                       break;
+                       return false;
                for (queue_id = 0, i = 0;
                     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
                        ring = &pcipriv->dev.tx_ring[queue_id];
index 94a3e17..3527c79 100644 (file)
@@ -57,9 +57,9 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
        const struct firmware *firmware;
        int err;
 
-       rtlpriv->dm.dm_initialgain_enable = 1;
+       rtlpriv->dm.dm_initialgain_enable = true;
        rtlpriv->dm.dm_flag = 0;
-       rtlpriv->dm.disable_framebursting = 0;
+       rtlpriv->dm.disable_framebursting = false;
        rtlpriv->dm.thermalvalue = 0;
        rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
index 3ac7af1..0883349 100644 (file)
@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
                break;
        case ERFSLEEP:
                if (ppsc->rfpwr_state == ERFOFF)
-                       break;
+                       return false;
 
                for (queue_id = 0, i = 0;
                     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
index 149493f..7911c9c 100644 (file)
@@ -99,9 +99,9 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
 
        rtlpriv->dm.dm_initialgain_enable = true;
        rtlpriv->dm.dm_flag = 0;
-       rtlpriv->dm.disable_framebursting = 0;
+       rtlpriv->dm.disable_framebursting = false;
        rtlpriv->dm.thermalvalue = 0;
-       rtlpriv->dm.useramask = 1;
+       rtlpriv->dm.useramask = true;
 
        /* dual mac */
        if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
index f27171a..f10ac1a 100644 (file)
@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
                }
        case ERFSLEEP:
                        if (ppsc->rfpwr_state == ERFOFF)
-                               break;
+                               return false;
 
                        for (queue_id = 0, i = 0;
                             queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
index 92f49d5..78723cf 100644 (file)
@@ -98,9 +98,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
        int err = 0;
        u16 earlyrxthreshold = 7;
 
-       rtlpriv->dm.dm_initialgain_enable = 1;
+       rtlpriv->dm.dm_initialgain_enable = true;
        rtlpriv->dm.dm_flag = 0;
-       rtlpriv->dm.disable_framebursting = 0;
+       rtlpriv->dm.disable_framebursting = false;
        rtlpriv->dm.thermalvalue = 0;
        rtlpriv->dm.useramask = true;
 
index f3c132b..9b7d60c 100644 (file)
@@ -1488,7 +1488,7 @@ struct rtl_intf_ops {
 
 struct rtl_mod_params {
        /* default: 0 = using hardware encryption */
-       int sw_crypto;
+       bool sw_crypto;
 
        /* default: 0 = DBG_EMERG (0)*/
        int debug;
@@ -1544,14 +1544,13 @@ struct rtl_hal_cfg {
 struct rtl_locks {
        /* mutex */
        struct mutex conf_mutex;
+       struct mutex ps_mutex;
 
        /*spin lock */
-       spinlock_t ips_lock;
        spinlock_t irq_th_lock;
        spinlock_t h2c_lock;
        spinlock_t rf_ps_lock;
        spinlock_t rf_lock;
-       spinlock_t lps_lock;
        spinlock_t waitq_lock;
 
        /*Dual mac*/
@@ -1576,7 +1575,8 @@ struct rtl_works {
        /* For SW LPS */
        struct delayed_work ps_work;
        struct delayed_work ps_rfon_wq;
-       struct tasklet_struct ips_leave_tasklet;
+
+       struct work_struct lps_leave_work;
 };
 
 struct rtl_debug {
index eaa5f95..6248c35 100644 (file)
@@ -319,7 +319,6 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
 static struct spi_driver wl1251_spi_driver = {
        .driver = {
                .name           = DRIVER_NAME,
-               .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
        },
 
index 3fe388b..af08c86 100644 (file)
@@ -42,16 +42,6 @@ config WL12XX_SDIO
          If you choose to build a module, it'll be called wl12xx_sdio.
          Say N if unsure.
 
-config WL12XX_SDIO_TEST
-       tristate "TI wl12xx SDIO testing support"
-       depends on WL12XX && MMC && WL12XX_SDIO
-       default n
-       ---help---
-         This module adds support for the SDIO bus testing with the
-         TI wl12xx chipsets.  You probably don't want this unless you are
-         testing a new hardware platform.  Select this if you want to test the
-         SDIO bus which is connected to the wl12xx chip.
-
 config WL12XX_PLATFORM_DATA
        bool
        depends on WL12XX_SDIO != n || WL1251_SDIO != n
index 621b348..fe67262 100644 (file)
@@ -3,14 +3,11 @@ wl12xx-objs           = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
 
 wl12xx_spi-objs        = spi.o
 wl12xx_sdio-objs       = sdio.o
-wl12xx_sdio_test-objs  = sdio_test.o
 
 wl12xx-$(CONFIG_NL80211_TESTMODE)      += testmode.o
 obj-$(CONFIG_WL12XX)                   += wl12xx.o
 obj-$(CONFIG_WL12XX_SPI)               += wl12xx_spi.o
 obj-$(CONFIG_WL12XX_SDIO)              += wl12xx_sdio.o
 
-obj-$(CONFIG_WL12XX_SDIO_TEST)         += wl12xx_sdio_test.o
-
 # small builtin driver bit
 obj-$(CONFIG_WL12XX_PLATFORM_DATA)     += wl12xx_platform_data.o
index ca044a7..7537c40 100644 (file)
 #include <linux/slab.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "wl12xx_80211.h"
 #include "reg.h"
 #include "ps.h"
 
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct acx_wake_up_condition *wake_up;
        int ret;
@@ -46,7 +47,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
                goto out;
        }
 
-       wake_up->role_id = wl->role_id;
+       wake_up->role_id = wlvif->role_id;
        wake_up->wake_up_event = wl->conf.conn.wake_up_event;
        wake_up->listen_interval = wl->conf.conn.listen_interval;
 
@@ -84,7 +85,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_tx_power(struct wl1271 *wl, int power)
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                       int power)
 {
        struct acx_current_tx_power *acx;
        int ret;
@@ -100,7 +102,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->current_tx_power = power * 10;
 
        ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
@@ -114,7 +116,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_feature_cfg(struct wl1271 *wl)
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct acx_feature_config *feature;
        int ret;
@@ -128,7 +130,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl)
        }
 
        /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
-       feature->role_id = wl->role_id;
+       feature->role_id = wlvif->role_id;
        feature->data_flow_options = 0;
        feature->options = 0;
 
@@ -184,33 +186,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_pd_threshold(struct wl1271 *wl)
-{
-       struct acx_packet_detection *pd;
-       int ret;
-
-       wl1271_debug(DEBUG_ACX, "acx data pd threshold");
-
-       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
-       if (!pd) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
-
-       ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
-       if (ret < 0) {
-               wl1271_warning("failed to set pd threshold: %d", ret);
-               goto out;
-       }
-
-out:
-       kfree(pd);
-       return ret;
-}
-
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                   enum acx_slot_type slot_time)
 {
        struct acx_slot *slot;
        int ret;
@@ -223,7 +200,7 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
                goto out;
        }
 
-       slot->role_id = wl->role_id;
+       slot->role_id = wlvif->role_id;
        slot->wone_index = STATION_WONE_INDEX;
        slot->slot_time = slot_time;
 
@@ -238,8 +215,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
-                                void *mc_list, u32 mc_list_len)
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable, void *mc_list, u32 mc_list_len)
 {
        struct acx_dot11_grp_addr_tbl *acx;
        int ret;
@@ -253,7 +230,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
        }
 
        /* MAC filtering */
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->enabled = enable;
        acx->num_groups = mc_list_len;
        memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
@@ -270,7 +247,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_service_period_timeout(struct wl1271 *wl)
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+                                     struct wl12xx_vif *wlvif)
 {
        struct acx_rx_timeout *rx_timeout;
        int ret;
@@ -283,7 +261,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
 
        wl1271_debug(DEBUG_ACX, "acx service period timeout");
 
-       rx_timeout->role_id = wl->role_id;
+       rx_timeout->role_id = wlvif->role_id;
        rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
        rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
 
@@ -300,7 +278,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u32 rts_threshold)
 {
        struct acx_rts_threshold *rts;
        int ret;
@@ -320,7 +299,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
                goto out;
        }
 
-       rts->role_id = wl->role_id;
+       rts->role_id = wlvif->role_id;
        rts->threshold = cpu_to_le16((u16)rts_threshold);
 
        ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
@@ -363,7 +342,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable_filter)
 {
        struct acx_beacon_filter_option *beacon_filter = NULL;
        int ret = 0;
@@ -380,7 +360,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
                goto out;
        }
 
-       beacon_filter->role_id = wl->role_id;
+       beacon_filter->role_id = wlvif->role_id;
        beacon_filter->enable = enable_filter;
 
        /*
@@ -401,7 +381,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif)
 {
        struct acx_beacon_filter_ie_table *ie_table;
        int i, idx = 0;
@@ -417,7 +398,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
        }
 
        /* configure default beacon pass-through rules */
-       ie_table->role_id = wl->role_id;
+       ie_table->role_id = wlvif->role_id;
        ie_table->num_ie = 0;
        for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
                struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
@@ -458,7 +439,8 @@ out:
 
 #define ACX_CONN_MONIT_DISABLE_VALUE  0xffffffff
 
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable)
 {
        struct acx_conn_monit_params *acx;
        u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
@@ -479,7 +461,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
                timeout = wl->conf.conn.bss_lose_timeout;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->synch_fail_thold = cpu_to_le32(threshold);
        acx->bss_lose_timeout = cpu_to_le32(timeout);
 
@@ -582,7 +564,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct acx_beacon_broadcast *bb;
        int ret;
@@ -595,7 +577,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
                goto out;
        }
 
-       bb->role_id = wl->role_id;
+       bb->role_id = wlvif->role_id;
        bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
        bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
        bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
@@ -612,7 +594,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid)
 {
        struct acx_aid *acx_aid;
        int ret;
@@ -625,7 +607,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
                goto out;
        }
 
-       acx_aid->role_id = wl->role_id;
+       acx_aid->role_id = wlvif->role_id;
        acx_aid->aid = cpu_to_le16(aid);
 
        ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
@@ -668,7 +650,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                           enum acx_preamble_type preamble)
 {
        struct acx_preamble *acx;
        int ret;
@@ -681,7 +664,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->preamble = preamble;
 
        ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
@@ -695,7 +678,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                           enum acx_ctsprotect_type ctsprotect)
 {
        struct acx_ctsprotect *acx;
@@ -709,7 +692,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl,
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->ctsprotect = ctsprotect;
 
        ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
@@ -739,7 +722,7 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
        return 0;
 }
 
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct acx_rate_policy *acx;
        struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
@@ -755,11 +738,11 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
        }
 
        wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
-               wl->basic_rate, wl->rate_set);
+               wlvif->basic_rate, wlvif->rate_set);
 
        /* configure one basic rate class */
-       acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE);
-       acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate);
+       acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx);
+       acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate);
        acx->rate_policy.short_retry_limit = c->short_retry_limit;
        acx->rate_policy.long_retry_limit = c->long_retry_limit;
        acx->rate_policy.aflags = c->aflags;
@@ -771,8 +754,8 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
        }
 
        /* configure one AP supported rate class */
-       acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE);
-       acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set);
+       acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
+       acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set);
        acx->rate_policy.short_retry_limit = c->short_retry_limit;
        acx->rate_policy.long_retry_limit = c->long_retry_limit;
        acx->rate_policy.aflags = c->aflags;
@@ -788,7 +771,7 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
         * (p2p packets should always go out with OFDM rates, even
         * if we are currently connected to 11b AP)
         */
-       acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE_P2P);
+       acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx);
        acx->rate_policy.enabled_rates =
                                cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P);
        acx->rate_policy.short_retry_limit = c->short_retry_limit;
@@ -839,8 +822,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
-                     u8 aifsn, u16 txop)
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop)
 {
        struct acx_ac_cfg *acx;
        int ret = 0;
@@ -855,7 +838,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->ac = ac;
        acx->cw_min = cw_min;
        acx->cw_max = cpu_to_le16(cw_max);
@@ -873,7 +856,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u8 queue_id, u8 channel_type,
                       u8 tsid, u8 ps_scheme, u8 ack_policy,
                       u32 apsd_conf0, u32 apsd_conf1)
 {
@@ -889,7 +873,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->queue_id = queue_id;
        acx->channel_type = channel_type;
        acx->tsid = tsid;
@@ -1098,7 +1082,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         bool enable)
 {
        struct wl1271_acx_bet_enable *acx = NULL;
        int ret = 0;
@@ -1114,7 +1099,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
        acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
 
@@ -1129,7 +1114,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u8 enable, __be32 address)
 {
        struct wl1271_acx_arp_filter *acx;
        int ret;
@@ -1142,7 +1128,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->version = ACX_IPV4_VERSION;
        acx->enable = enable;
 
@@ -1189,7 +1175,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                              bool enable)
 {
        struct wl1271_acx_keep_alive_mode *acx = NULL;
        int ret = 0;
@@ -1202,7 +1189,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->enabled = enable;
 
        ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
@@ -1216,7 +1203,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                u8 index, u8 tpl_valid)
 {
        struct wl1271_acx_keep_alive_config *acx = NULL;
        int ret = 0;
@@ -1229,7 +1217,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
        acx->index = index;
        acx->tpl_validation = tpl_valid;
@@ -1247,8 +1235,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
-                               s16 thold, u8 hyst)
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                               bool enable, s16 thold, u8 hyst)
 {
        struct wl1271_acx_rssi_snr_trigger *acx = NULL;
        int ret = 0;
@@ -1261,9 +1249,9 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
                goto out;
        }
 
-       wl->last_rssi_event = -1;
+       wlvif->last_rssi_event = -1;
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
        acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
        acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
@@ -1288,7 +1276,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif)
 {
        struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
        struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
@@ -1302,7 +1291,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->rssi_beacon = c->avg_weight_rssi_beacon;
        acx->rssi_data = c->avg_weight_rssi_data;
        acx->snr_beacon = c->avg_weight_snr_beacon;
@@ -1367,6 +1356,7 @@ out:
 }
 
 int wl1271_acx_set_ht_information(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif,
                                   u16 ht_operation_mode)
 {
        struct wl1271_acx_ht_information *acx;
@@ -1380,7 +1370,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->ht_protection =
                (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
        acx->rifs_mode = 0;
@@ -1402,7 +1392,8 @@ out:
 }
 
 /* Configure BA session initiator/receiver parameters setting in the FW. */
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+                                      struct wl12xx_vif *wlvif)
 {
        struct wl1271_acx_ba_initiator_policy *acx;
        int ret;
@@ -1416,7 +1407,7 @@ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
        }
 
        /* set for the current role */
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
        acx->win_size = wl->conf.ht.tx_ba_win_size;
        acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
@@ -1494,7 +1485,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                              bool enable)
 {
        struct wl1271_acx_ps_rx_streaming *rx_streaming;
        u32 conf_queues, enable_queues;
@@ -1523,7 +1515,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
                if (!(conf_queues & BIT(i)))
                        continue;
 
-               rx_streaming->role_id = wl->role_id;
+               rx_streaming->role_id = wlvif->role_id;
                rx_streaming->tid = i;
                rx_streaming->enable = enable_queues & BIT(i);
                rx_streaming->period = wl->conf.rx_streaming.interval;
@@ -1542,7 +1534,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl1271_acx_ap_max_tx_retry *acx = NULL;
        int ret;
@@ -1553,7 +1545,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
        if (!acx)
                return -ENOMEM;
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
 
        ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
@@ -1567,7 +1559,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_config_ps(struct wl1271 *wl)
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl1271_acx_config_ps *config_ps;
        int ret;
@@ -1582,7 +1574,7 @@ int wl1271_acx_config_ps(struct wl1271 *wl)
 
        config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
        config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
-       config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+       config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate);
 
        ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
                                   sizeof(*config_ps));
index e3f93b4..69892b4 100644 (file)
@@ -171,13 +171,6 @@ struct acx_rx_msdu_lifetime {
        __le32 lifetime;
 } __packed;
 
-struct acx_packet_detection {
-       struct acx_header header;
-
-       __le32 threshold;
-} __packed;
-
-
 enum acx_slot_type {
        SLOT_TIME_LONG = 0,
        SLOT_TIME_SHORT = 1,
@@ -654,11 +647,6 @@ struct acx_rate_class {
        u8 reserved;
 };
 
-#define ACX_TX_BASIC_RATE      0
-#define ACX_TX_AP_FULL_RATE    1
-#define ACX_TX_BASIC_RATE_P2P  2
-#define ACX_TX_AP_MODE_MGMT_RATE 4
-#define ACX_TX_AP_MODE_BCST_RATE 5
 struct acx_rate_policy {
        struct acx_header header;
 
@@ -1234,39 +1222,48 @@ enum {
 };
 
 
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl,
+                                 struct wl12xx_vif *wlvif);
 int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
-int wl1271_acx_tx_power(struct wl1271 *wl, int power);
-int wl1271_acx_feature_cfg(struct wl1271 *wl);
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                       int power);
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_acx_mem_map(struct wl1271 *wl,
                       struct acx_header *mem_map, size_t len);
 int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
-int wl1271_acx_pd_threshold(struct wl1271 *wl);
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
-                                void *mc_list, u32 mc_list_len);
-int wl1271_acx_service_period_timeout(struct wl1271 *wl);
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold);
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                   enum acx_slot_type slot_time);
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable, void *mc_list, u32 mc_list_len);
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+                                     struct wl12xx_vif *wlvif);
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u32 rts_threshold);
 int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable_filter);
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable);
 int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
 int wl12xx_acx_sg_cfg(struct wl1271 *wl);
 int wl1271_acx_cca_threshold(struct wl1271 *wl);
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid);
 int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                           enum acx_preamble_type preamble);
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                           enum acx_ctsprotect_type ctsprotect);
 int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
                      u8 idx);
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
-                     u8 aifsn, u16 txop);
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop);
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u8 queue_id, u8 channel_type,
                       u8 tsid, u8 ps_scheme, u8 ack_policy,
                       u32 apsd_conf0, u32 apsd_conf1);
 int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
@@ -1276,26 +1273,34 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl);
 int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
 int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
 int wl1271_acx_smart_reflex(struct wl1271 *wl);
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address);
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         bool enable);
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u8 enable, __be32 address);
 int wl1271_acx_pm_config(struct wl1271 *wl);
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
-                               s16 thold, u8 hyst);
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *vif,
+                              bool enable);
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                u8 index, u8 tpl_valid);
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                               bool enable, s16 thold, u8 hyst);
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif);
 int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
                                    struct ieee80211_sta_ht_cap *ht_cap,
                                    bool allow_ht_operation, u8 hlid);
 int wl1271_acx_set_ht_information(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif,
                                   u16 ht_operation_mode);
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl);
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+                                      struct wl12xx_vif *wlvif);
 int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
                                       u16 ssn, bool enable, u8 peer_hlid);
 int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
-int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                              bool enable);
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
 int wl1271_acx_fm_coex(struct wl1271 *wl);
 int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
index 6813379..8f9cf5a 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/wl12xx.h>
 #include <linux/export.h>
 
+#include "debug.h"
 #include "acx.h"
 #include "reg.h"
 #include "boot.h"
@@ -347,6 +348,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
                nvs_ptr += 3;
 
                for (i = 0; i < burst_len; i++) {
+                       if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
+                               goto out_badnvs;
+
                        val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
                               | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
 
@@ -358,6 +362,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
                        nvs_ptr += 4;
                        dest_addr += 4;
                }
+
+               if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+                       goto out_badnvs;
        }
 
        /*
@@ -369,6 +376,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
         */
        nvs_ptr = (u8 *)wl->nvs +
                        ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
+
+       if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+               goto out_badnvs;
+
        nvs_len -= nvs_ptr - (u8 *)wl->nvs;
 
        /* Now we must set the partition correctly */
@@ -384,6 +395,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
 
        kfree(nvs_aligned);
        return 0;
+
+out_badnvs:
+       wl1271_error("nvs data is malformed");
+       return -EILSEQ;
 }
 
 static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
index a52299e..e0d2179 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/slab.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "reg.h"
 #include "io.h"
 #include "acx.h"
@@ -120,6 +121,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
        if (!wl->nvs)
                return -ENODEV;
 
+       if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+               wl1271_warning("FEM index from INI out of bounds");
+               return -EINVAL;
+       }
+
        gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
        if (!gen_parms)
                return -ENOMEM;
@@ -143,6 +149,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
        gp->tx_bip_fem_manufacturer =
                gen_parms->general_params.tx_bip_fem_manufacturer;
 
+       if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+               wl1271_warning("FEM index from FW out of bounds");
+               ret = -EINVAL;
+               goto out;
+       }
+
        wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
                     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
 
@@ -162,6 +174,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
        if (!wl->nvs)
                return -ENODEV;
 
+       if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+               wl1271_warning("FEM index from ini out of bounds");
+               return -EINVAL;
+       }
+
        gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
        if (!gen_parms)
                return -ENOMEM;
@@ -186,6 +203,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
        gp->tx_bip_fem_manufacturer =
                gen_parms->general_params.tx_bip_fem_manufacturer;
 
+       if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+               wl1271_warning("FEM index from FW out of bounds");
+               ret = -EINVAL;
+               goto out;
+       }
+
        wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
                     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
 
@@ -358,7 +381,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
        return 0;
 }
 
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+                          u8 *role_id)
 {
        struct wl12xx_cmd_role_enable *cmd;
        int ret;
@@ -381,7 +405,7 @@ int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
                goto out_free;
        }
 
-       memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN);
+       memcpy(cmd->mac_address, addr, ETH_ALEN);
        cmd->role_type = role_type;
 
        ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
@@ -433,37 +457,41 @@ out:
        return ret;
 }
 
-static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid)
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
 {
        u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
        if (link >= WL12XX_MAX_LINKS)
                return -EBUSY;
 
        __set_bit(link, wl->links_map);
+       __set_bit(link, wlvif->links_map);
        *hlid = link;
        return 0;
 }
 
-static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid)
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
 {
        if (*hlid == WL12XX_INVALID_LINK_ID)
                return;
 
        __clear_bit(*hlid, wl->links_map);
+       __clear_bit(*hlid, wlvif->links_map);
        *hlid = WL12XX_INVALID_LINK_ID;
 }
 
-static int wl12xx_get_new_session_id(struct wl1271 *wl)
+static int wl12xx_get_new_session_id(struct wl1271 *wl,
+                                    struct wl12xx_vif *wlvif)
 {
-       if (wl->session_counter >= SESSION_COUNTER_MAX)
-               wl->session_counter = 0;
+       if (wlvif->session_counter >= SESSION_COUNTER_MAX)
+               wlvif->session_counter = 0;
 
-       wl->session_counter++;
+       wlvif->session_counter++;
 
-       return wl->session_counter;
+       return wlvif->session_counter;
 }
 
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
+                                    struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_role_start *cmd;
        int ret;
@@ -474,20 +502,20 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
 
-       cmd->role_id = wl->dev_role_id;
-       if (wl->band == IEEE80211_BAND_5GHZ)
+       cmd->role_id = wlvif->dev_role_id;
+       if (wlvif->band == IEEE80211_BAND_5GHZ)
                cmd->band = WL12XX_BAND_5GHZ;
-       cmd->channel = wl->channel;
+       cmd->channel = wlvif->channel;
 
-       if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) {
-               ret = wl12xx_allocate_link(wl, &wl->dev_hlid);
+       if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
+               ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
                if (ret)
                        goto out_free;
        }
-       cmd->device.hlid = wl->dev_hlid;
-       cmd->device.session = wl->session_counter;
+       cmd->device.hlid = wlvif->dev_hlid;
+       cmd->device.session = wlvif->session_counter;
 
        wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
                     cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -502,9 +530,7 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
 
 err_hlid:
        /* clear links on error */
-       __clear_bit(wl->dev_hlid, wl->links_map);
-       wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-
+       wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
 
 out_free:
        kfree(cmd);
@@ -513,12 +539,13 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_role_stop *cmd;
        int ret;
 
-       if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID))
+       if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID))
                return -EINVAL;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -529,7 +556,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
 
        wl1271_debug(DEBUG_CMD, "cmd role stop dev");
 
-       cmd->role_id = wl->dev_role_id;
+       cmd->role_id = wlvif->dev_role_id;
        cmd->disc_type = DISCONNECT_IMMEDIATE;
        cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
 
@@ -545,7 +572,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
                goto out_free;
        }
 
-       wl12xx_free_link(wl, &wl->dev_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
 
 out_free:
        kfree(cmd);
@@ -554,8 +581,9 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct wl12xx_cmd_role_start *cmd;
        int ret;
 
@@ -565,33 +593,33 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
 
-       cmd->role_id = wl->role_id;
-       if (wl->band == IEEE80211_BAND_5GHZ)
+       cmd->role_id = wlvif->role_id;
+       if (wlvif->band == IEEE80211_BAND_5GHZ)
                cmd->band = WL12XX_BAND_5GHZ;
-       cmd->channel = wl->channel;
-       cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
-       cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int);
+       cmd->channel = wlvif->channel;
+       cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+       cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
        cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
-       cmd->sta.ssid_len = wl->ssid_len;
-       memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len);
-       memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN);
-       cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+       cmd->sta.ssid_len = wlvif->ssid_len;
+       memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
+       memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
+       cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
 
-       if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
-               ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+       if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+               ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
                if (ret)
                        goto out_free;
        }
-       cmd->sta.hlid = wl->sta_hlid;
-       cmd->sta.session = wl12xx_get_new_session_id(wl);
-       cmd->sta.remote_rates = cpu_to_le32(wl->rate_set);
+       cmd->sta.hlid = wlvif->sta.hlid;
+       cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif);
+       cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set);
 
        wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
                     "basic_rate_set: 0x%x, remote_rates: 0x%x",
-                    wl->role_id, cmd->sta.hlid, cmd->sta.session,
-                    wl->basic_rate_set, wl->rate_set);
+                    wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+                    wlvif->basic_rate_set, wlvif->rate_set);
 
        ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
        if (ret < 0) {
@@ -603,7 +631,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
 
 err_hlid:
        /* clear links on error. */
-       wl12xx_free_link(wl, &wl->sta_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
 
 out_free:
        kfree(cmd);
@@ -613,12 +641,12 @@ out:
 }
 
 /* use this function to stop ibss as well */
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_role_stop *cmd;
        int ret;
 
-       if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID))
+       if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
                return -EINVAL;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -627,9 +655,9 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id);
 
-       cmd->role_id = wl->role_id;
+       cmd->role_id = wlvif->role_id;
        cmd->disc_type = DISCONNECT_IMMEDIATE;
        cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
 
@@ -639,7 +667,7 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
                goto out_free;
        }
 
-       wl12xx_free_link(wl, &wl->sta_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
 
 out_free:
        kfree(cmd);
@@ -648,16 +676,17 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_role_start *cmd;
-       struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
        int ret;
 
-       wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
 
        /* trying to use hidden SSID with an old hostapd version */
-       if (wl->ssid_len == 0 && !bss_conf->hidden_ssid) {
+       if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) {
                wl1271_error("got a null SSID from beacon/bss");
                ret = -EINVAL;
                goto out;
@@ -669,30 +698,30 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
                goto out;
        }
 
-       ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid);
+       ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid);
        if (ret < 0)
                goto out_free;
 
-       ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid);
+       ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid);
        if (ret < 0)
                goto out_free_global;
 
-       cmd->role_id = wl->role_id;
+       cmd->role_id = wlvif->role_id;
        cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
        cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
-       cmd->ap.global_hlid = wl->ap_global_hlid;
-       cmd->ap.broadcast_hlid = wl->ap_bcast_hlid;
-       cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
-       cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int);
+       cmd->ap.global_hlid = wlvif->ap.global_hlid;
+       cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
+       cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+       cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
        cmd->ap.dtim_interval = bss_conf->dtim_period;
        cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
-       cmd->channel = wl->channel;
+       cmd->channel = wlvif->channel;
 
        if (!bss_conf->hidden_ssid) {
                /* take the SSID from the beacon for backward compatibility */
                cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
-               cmd->ap.ssid_len = wl->ssid_len;
-               memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len);
+               cmd->ap.ssid_len = wlvif->ssid_len;
+               memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len);
        } else {
                cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
                cmd->ap.ssid_len = bss_conf->ssid_len;
@@ -701,7 +730,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
 
        cmd->ap.local_rates = cpu_to_le32(0xffffffff);
 
-       switch (wl->band) {
+       switch (wlvif->band) {
        case IEEE80211_BAND_2GHZ:
                cmd->band = RADIO_BAND_2_4GHZ;
                break;
@@ -709,7 +738,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
                cmd->band = RADIO_BAND_5GHZ;
                break;
        default:
-               wl1271_warning("ap start - unknown band: %d", (int)wl->band);
+               wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
                cmd->band = RADIO_BAND_2_4GHZ;
                break;
        }
@@ -723,10 +752,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
        goto out_free;
 
 out_free_bcast:
-       wl12xx_free_link(wl, &wl->ap_bcast_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
 
 out_free_global:
-       wl12xx_free_link(wl, &wl->ap_global_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
 
 out_free:
        kfree(cmd);
@@ -735,7 +764,7 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_role_stop *cmd;
        int ret;
@@ -746,9 +775,9 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id);
 
-       cmd->role_id = wl->role_id;
+       cmd->role_id = wlvif->role_id;
 
        ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
        if (ret < 0) {
@@ -756,8 +785,8 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
                goto out_free;
        }
 
-       wl12xx_free_link(wl, &wl->ap_bcast_hlid);
-       wl12xx_free_link(wl, &wl->ap_global_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
 
 out_free:
        kfree(cmd);
@@ -766,10 +795,11 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct wl12xx_cmd_role_start *cmd;
-       struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
        int ret;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -778,35 +808,36 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
 
-       cmd->role_id = wl->role_id;
-       if (wl->band == IEEE80211_BAND_5GHZ)
+       cmd->role_id = wlvif->role_id;
+       if (wlvif->band == IEEE80211_BAND_5GHZ)
                cmd->band = WL12XX_BAND_5GHZ;
-       cmd->channel = wl->channel;
-       cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
-       cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int);
+       cmd->channel = wlvif->channel;
+       cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+       cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
        cmd->ibss.dtim_interval = bss_conf->dtim_period;
        cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
-       cmd->ibss.ssid_len = wl->ssid_len;
-       memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len);
-       memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN);
-       cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+       cmd->ibss.ssid_len = wlvif->ssid_len;
+       memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len);
+       memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN);
+       cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
 
-       if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
-               ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+       if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+               ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
                if (ret)
                        goto out_free;
        }
-       cmd->ibss.hlid = wl->sta_hlid;
-       cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set);
+       cmd->ibss.hlid = wlvif->sta.hlid;
+       cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set);
 
        wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
                     "basic_rate_set: 0x%x, remote_rates: 0x%x",
-                    wl->role_id, cmd->sta.hlid, cmd->sta.session,
-                    wl->basic_rate_set, wl->rate_set);
+                    wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+                    wlvif->basic_rate_set, wlvif->rate_set);
 
-       wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid);
+       wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM",
+                    vif->bss_conf.bssid);
 
        ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
        if (ret < 0) {
@@ -818,7 +849,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
 
 err_hlid:
        /* clear links on error. */
-       wl12xx_free_link(wl, &wl->sta_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
 
 out_free:
        kfree(cmd);
@@ -962,7 +993,8 @@ out:
        return ret;
 }
 
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u8 ps_mode)
 {
        struct wl1271_cmd_ps_params *ps_params = NULL;
        int ret = 0;
@@ -975,7 +1007,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
                goto out;
        }
 
-       ps_params->role_id = wl->role_id;
+       ps_params->role_id = wlvif->role_id;
        ps_params->ps_mode = ps_mode;
 
        ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -1030,7 +1062,7 @@ out:
        return ret;
 }
 
-int wl1271_cmd_build_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct sk_buff *skb = NULL;
        int size;
@@ -1038,11 +1070,12 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
        int ret = -ENOMEM;
 
 
-       if (wl->bss_type == BSS_TYPE_IBSS) {
+       if (wlvif->bss_type == BSS_TYPE_IBSS) {
                size = sizeof(struct wl12xx_null_data_template);
                ptr = NULL;
        } else {
-               skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+               skb = ieee80211_nullfunc_get(wl->hw,
+                                            wl12xx_wlvif_to_vif(wlvif));
                if (!skb)
                        goto out;
                size = skb->len;
@@ -1050,7 +1083,7 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
        }
 
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
-                                     wl->basic_rate);
+                                     wlvif->basic_rate);
 
 out:
        dev_kfree_skb(skb);
@@ -1061,19 +1094,21 @@ out:
 
 }
 
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct sk_buff *skb = NULL;
        int ret = -ENOMEM;
 
-       skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+       skb = ieee80211_nullfunc_get(wl->hw, vif);
        if (!skb)
                goto out;
 
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
                                      skb->data, skb->len,
                                      CMD_TEMPL_KLV_IDX_NULL_DATA,
-                                     wl->basic_rate);
+                                     wlvif->basic_rate);
 
 out:
        dev_kfree_skb(skb);
@@ -1084,32 +1119,35 @@ out:
 
 }
 
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u16 aid)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct sk_buff *skb;
        int ret = 0;
 
-       skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+       skb = ieee80211_pspoll_get(wl->hw, vif);
        if (!skb)
                goto out;
 
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
-                                     skb->len, 0, wl->basic_rate_set);
+                                     skb->len, 0, wlvif->basic_rate_set);
 
 out:
        dev_kfree_skb(skb);
        return ret;
 }
 
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                               const u8 *ssid, size_t ssid_len,
                               const u8 *ie, size_t ie_len, u8 band)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct sk_buff *skb;
        int ret;
        u32 rate;
 
-       skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+       skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
                                     ie, ie_len);
        if (!skb) {
                ret = -ENOMEM;
@@ -1118,7 +1156,7 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl,
 
        wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
 
-       rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
+       rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
        if (band == IEEE80211_BAND_2GHZ)
                ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
                                              skb->data, skb->len, 0, rate);
@@ -1132,20 +1170,22 @@ out:
 }
 
 struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+                                             struct wl12xx_vif *wlvif,
                                              struct sk_buff *skb)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        int ret;
        u32 rate;
 
        if (!skb)
-               skb = ieee80211_ap_probereq_get(wl->hw, wl->vif);
+               skb = ieee80211_ap_probereq_get(wl->hw, vif);
        if (!skb)
                goto out;
 
        wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
 
-       rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[wl->band]);
-       if (wl->band == IEEE80211_BAND_2GHZ)
+       rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
+       if (wlvif->band == IEEE80211_BAND_2GHZ)
                ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
                                              skb->data, skb->len, 0, rate);
        else
@@ -1159,9 +1199,11 @@ out:
        return skb;
 }
 
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            __be32 ip_addr)
 {
        int ret;
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct wl12xx_arp_rsp_template tmpl;
        struct ieee80211_hdr_3addr *hdr;
        struct arphdr *arp_hdr;
@@ -1173,8 +1215,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
        hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                         IEEE80211_STYPE_DATA |
                                         IEEE80211_FCTL_TODS);
-       memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN);
-       memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN);
+       memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
+       memcpy(hdr->addr2, vif->addr, ETH_ALEN);
        memset(hdr->addr3, 0xff, ETH_ALEN);
 
        /* llc layer */
@@ -1190,25 +1232,26 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
        arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
 
        /* arp payload */
-       memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
+       memcpy(tmpl.sender_hw, vif->addr, ETH_ALEN);
        tmpl.sender_ip = ip_addr;
 
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP,
                                      &tmpl, sizeof(tmpl), 0,
-                                     wl->basic_rate);
+                                     wlvif->basic_rate);
 
        return ret;
 }
 
-int wl1271_build_qos_null_data(struct wl1271 *wl)
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct ieee80211_qos_hdr template;
 
        memset(&template, 0, sizeof(template));
 
-       memcpy(template.addr1, wl->bssid, ETH_ALEN);
-       memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
-       memcpy(template.addr3, wl->bssid, ETH_ALEN);
+       memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN);
+       memcpy(template.addr2, vif->addr, ETH_ALEN);
+       memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN);
 
        template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                             IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1219,7 +1262,7 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
 
        return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
                                       sizeof(template), 0,
-                                      wl->basic_rate);
+                                      wlvif->basic_rate);
 }
 
 int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
@@ -1253,7 +1296,8 @@ out:
        return ret;
 }
 
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u16 action, u8 id, u8 key_type,
                       u8 key_size, const u8 *key, const u8 *addr,
                       u32 tx_seq_32, u16 tx_seq_16)
 {
@@ -1261,7 +1305,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
        int ret = 0;
 
        /* hlid might have already been deleted */
-       if (wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+       if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
                return 0;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1270,7 +1314,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
                goto out;
        }
 
-       cmd->hlid = wl->sta_hlid;
+       cmd->hlid = wlvif->sta.hlid;
 
        if (key_type == KEY_WEP)
                cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
@@ -1321,9 +1365,10 @@ out:
  * TODO: merge with sta/ibss into 1 set_key function.
  * note there are slight diffs
  */
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
-                       u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
-                       u16 tx_seq_16)
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         u16 action, u8 id, u8 key_type,
+                         u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+                         u16 tx_seq_16)
 {
        struct wl1271_cmd_set_keys *cmd;
        int ret = 0;
@@ -1333,7 +1378,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
        if (!cmd)
                return -ENOMEM;
 
-       if (hlid == wl->ap_bcast_hlid) {
+       if (hlid == wlvif->ap.bcast_hlid) {
                if (key_type == KEY_WEP)
                        lid_type = WEP_DEFAULT_LID_TYPE;
                else
@@ -1411,7 +1456,8 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                       struct ieee80211_sta *sta, u8 hlid)
 {
        struct wl12xx_cmd_add_peer *cmd;
        int i, ret;
@@ -1438,13 +1484,13 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
                else
                        cmd->psd_type[i] = WL1271_PSD_LEGACY;
 
-       sta_rates = sta->supp_rates[wl->band];
+       sta_rates = sta->supp_rates[wlvif->band];
        if (sta->ht_cap.ht_supported)
                sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
 
        cmd->supported_rates =
                cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
-                                                       wl->band));
+                                                       wlvif->band));
 
        wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
                     cmd->supported_rates, sta->uapsd_queues);
@@ -1584,12 +1630,13 @@ out:
        return ret;
 }
 
-static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
+static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         u8 role_id)
 {
        struct wl12xx_cmd_roc *cmd;
        int ret = 0;
 
-       wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id);
+       wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id);
 
        if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
                return -EINVAL;
@@ -1601,8 +1648,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
        }
 
        cmd->role_id = role_id;
-       cmd->channel = wl->channel;
-       switch (wl->band) {
+       cmd->channel = wlvif->channel;
+       switch (wlvif->band) {
        case IEEE80211_BAND_2GHZ:
                cmd->band = RADIO_BAND_2_4GHZ;
                break;
@@ -1610,7 +1657,7 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
                cmd->band = RADIO_BAND_5GHZ;
                break;
        default:
-               wl1271_error("roc - unknown band: %d", (int)wl->band);
+               wl1271_error("roc - unknown band: %d", (int)wlvif->band);
                ret = -EINVAL;
                goto out_free;
        }
@@ -1657,14 +1704,14 @@ out:
        return ret;
 }
 
-int wl12xx_roc(struct wl1271 *wl, u8 role_id)
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
 {
        int ret = 0;
 
        if (WARN_ON(test_bit(role_id, wl->roc_map)))
                return 0;
 
-       ret = wl12xx_cmd_roc(wl, role_id);
+       ret = wl12xx_cmd_roc(wl, wlvif, role_id);
        if (ret < 0)
                goto out;
 
@@ -1753,3 +1800,50 @@ out_free:
 out:
        return ret;
 }
+
+/* start dev role and roc on its channel */
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       int ret;
+
+       if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+                     wlvif->bss_type == BSS_TYPE_IBSS)))
+               return -EINVAL;
+
+       ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+       if (ret < 0)
+               goto out;
+
+       ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
+       if (ret < 0)
+               goto out_stop;
+
+       return 0;
+
+out_stop:
+       wl12xx_cmd_role_stop_dev(wl, wlvif);
+out:
+       return ret;
+}
+
+/* croc dev hlid, and stop the role */
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       int ret;
+
+       if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+                     wlvif->bss_type == BSS_TYPE_IBSS)))
+               return -EINVAL;
+
+       if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+               ret = wl12xx_croc(wl, wlvif->dev_role_id);
+               if (ret < 0)
+                       goto out;
+       }
+
+       ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
+       if (ret < 0)
+               goto out;
+out:
+       return ret;
+}
index b7bd427..3f7d0b9 100644 (file)
@@ -36,45 +36,54 @@ int wl128x_cmd_general_parms(struct wl1271 *wl);
 int wl1271_cmd_radio_parms(struct wl1271 *wl);
 int wl128x_cmd_radio_parms(struct wl1271 *wl);
 int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id);
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+                          u8 *role_id);
 int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl);
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
 int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u8 ps_mode);
 int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
                           size_t len);
 int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
                            void *buf, size_t buf_len, int index, u32 rates);
-int wl1271_cmd_build_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u16 aid);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                               const u8 *ssid, size_t ssid_len,
                               const u8 *ie, size_t ie_len, u8 band);
 struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+                                             struct wl12xx_vif *wlvif,
                                              struct sk_buff *skb);
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
-int wl1271_build_qos_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            __be32 ip_addr);
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif);
 int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid);
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                          u16 action, u8 id, u8 key_type,
                           u8 key_size, const u8 *key, const u8 *addr,
                           u32 tx_seq_32, u16 tx_seq_16);
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         u16 action, u8 id, u8 key_type,
                          u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
                          u16 tx_seq_16);
 int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
-int wl12xx_roc(struct wl1271 *wl, u8 role_id);
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id);
 int wl12xx_croc(struct wl1271 *wl, u8 role_id);
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                       struct ieee80211_sta *sta, u8 hlid);
 int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
 int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
@@ -82,6 +91,9 @@ int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_channel_switch(struct wl1271 *wl,
                              struct ieee80211_channel_switch *ch_switch);
 int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                        u8 *hlid);
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
 
 enum wl1271_commands {
        CMD_INTERROGATE     = 1,    /*use this to read information elements*/
index 04bb8fb..1bcfb01 100644 (file)
@@ -440,6 +440,10 @@ struct conf_rx_settings {
        CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS |      \
        CONF_HW_BIT_RATE_54MBPS)
 
+#define CONF_TX_CCK_RATES  (CONF_HW_BIT_RATE_1MBPS |           \
+       CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS |     \
+       CONF_HW_BIT_RATE_11MBPS)
+
 #define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS |             \
        CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS |      \
        CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS |      \
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/wl12xx/debug.h
new file mode 100644 (file)
index 0000000..b85fd8c
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <coelho@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __DEBUG_H__
+#define __DEBUG_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+
+#define DRIVER_NAME "wl12xx"
+#define DRIVER_PREFIX DRIVER_NAME ": "
+
+enum {
+       DEBUG_NONE      = 0,
+       DEBUG_IRQ       = BIT(0),
+       DEBUG_SPI       = BIT(1),
+       DEBUG_BOOT      = BIT(2),
+       DEBUG_MAILBOX   = BIT(3),
+       DEBUG_TESTMODE  = BIT(4),
+       DEBUG_EVENT     = BIT(5),
+       DEBUG_TX        = BIT(6),
+       DEBUG_RX        = BIT(7),
+       DEBUG_SCAN      = BIT(8),
+       DEBUG_CRYPT     = BIT(9),
+       DEBUG_PSM       = BIT(10),
+       DEBUG_MAC80211  = BIT(11),
+       DEBUG_CMD       = BIT(12),
+       DEBUG_ACX       = BIT(13),
+       DEBUG_SDIO      = BIT(14),
+       DEBUG_FILTERS   = BIT(15),
+       DEBUG_ADHOC     = BIT(16),
+       DEBUG_AP        = BIT(17),
+       DEBUG_MASTER    = (DEBUG_ADHOC | DEBUG_AP),
+       DEBUG_ALL       = ~0,
+};
+
+extern u32 wl12xx_debug_level;
+
+#define DEBUG_DUMP_LIMIT 1024
+
+#define wl1271_error(fmt, arg...) \
+       pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
+
+#define wl1271_warning(fmt, arg...) \
+       pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
+
+#define wl1271_notice(fmt, arg...) \
+       pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_info(fmt, arg...) \
+       pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_debug(level, fmt, arg...) \
+       do { \
+               if (level & wl12xx_debug_level) \
+                       pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+       } while (0)
+
+/* TODO: use pr_debug_hex_dump when it becomes available */
+#define wl1271_dump(level, prefix, buf, len)   \
+       do { \
+               if (level & wl12xx_debug_level) \
+                       print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+                                      DUMP_PREFIX_OFFSET, 16, 1,       \
+                                      buf,                             \
+                                      min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+                                      0);                              \
+       } while (0)
+
+#define wl1271_dump_ascii(level, prefix, buf, len)     \
+       do { \
+               if (level & wl12xx_debug_level) \
+                       print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+                                      DUMP_PREFIX_OFFSET, 16, 1,       \
+                                      buf,                             \
+                                      min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+                                      true);                           \
+       } while (0)
+
+#endif /* __DEBUG_H__ */
index 3999fd5..15eb3a9 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "acx.h"
 #include "ps.h"
 #include "io.h"
@@ -316,12 +317,19 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
 {
        struct wl1271 *wl = file->private_data;
        int res = 0;
-       char buf[1024];
+       ssize_t ret;
+       char *buf;
+
+#define DRIVER_STATE_BUF_LEN 1024
+
+       buf = kmalloc(DRIVER_STATE_BUF_LEN, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
 
        mutex_lock(&wl->mutex);
 
 #define DRIVER_STATE_PRINT(x, fmt)   \
-       (res += scnprintf(buf + res, sizeof(buf) - res,\
+       (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
                          #x " = " fmt "\n", wl->x))
 
 #define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
@@ -346,29 +354,14 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
        DRIVER_STATE_PRINT_INT(tx_results_count);
        DRIVER_STATE_PRINT_LHEX(flags);
        DRIVER_STATE_PRINT_INT(tx_blocks_freed);
-       DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
        DRIVER_STATE_PRINT_INT(rx_counter);
-       DRIVER_STATE_PRINT_INT(session_counter);
        DRIVER_STATE_PRINT_INT(state);
-       DRIVER_STATE_PRINT_INT(bss_type);
        DRIVER_STATE_PRINT_INT(channel);
-       DRIVER_STATE_PRINT_HEX(rate_set);
-       DRIVER_STATE_PRINT_HEX(basic_rate_set);
-       DRIVER_STATE_PRINT_HEX(basic_rate);
        DRIVER_STATE_PRINT_INT(band);
-       DRIVER_STATE_PRINT_INT(beacon_int);
-       DRIVER_STATE_PRINT_INT(psm_entry_retry);
-       DRIVER_STATE_PRINT_INT(ps_poll_failures);
        DRIVER_STATE_PRINT_INT(power_level);
-       DRIVER_STATE_PRINT_INT(rssi_thold);
-       DRIVER_STATE_PRINT_INT(last_rssi_event);
        DRIVER_STATE_PRINT_INT(sg_enabled);
        DRIVER_STATE_PRINT_INT(enable_11a);
        DRIVER_STATE_PRINT_INT(noise);
-       DRIVER_STATE_PRINT_LHEX(ap_hlid_map[0]);
-       DRIVER_STATE_PRINT_INT(last_tx_hlid);
-       DRIVER_STATE_PRINT_INT(ba_support);
-       DRIVER_STATE_PRINT_HEX(ba_rx_bitmap);
        DRIVER_STATE_PRINT_HEX(ap_fw_ps_map);
        DRIVER_STATE_PRINT_LHEX(ap_ps_map);
        DRIVER_STATE_PRINT_HEX(quirks);
@@ -387,10 +380,13 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
 #undef DRIVER_STATE_PRINT_LHEX
 #undef DRIVER_STATE_PRINT_STR
 #undef DRIVER_STATE_PRINT
+#undef DRIVER_STATE_BUF_LEN
 
        mutex_unlock(&wl->mutex);
 
-       return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+       kfree(buf);
+       return ret;
 }
 
 static const struct file_operations driver_state_ops = {
@@ -399,6 +395,115 @@ static const struct file_operations driver_state_ops = {
        .llseek = default_llseek,
 };
 
+static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos)
+{
+       struct wl1271 *wl = file->private_data;
+       struct wl12xx_vif *wlvif;
+       int ret, res = 0;
+       const int buf_size = 4096;
+       char *buf;
+       char tmp_buf[64];
+
+       buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&wl->mutex);
+
+#define VIF_STATE_PRINT(x, fmt)                                \
+       (res += scnprintf(buf + res, buf_size - res,    \
+                         #x " = " fmt "\n", wlvif->x))
+
+#define VIF_STATE_PRINT_LONG(x)  VIF_STATE_PRINT(x, "%ld")
+#define VIF_STATE_PRINT_INT(x)   VIF_STATE_PRINT(x, "%d")
+#define VIF_STATE_PRINT_STR(x)   VIF_STATE_PRINT(x, "%s")
+#define VIF_STATE_PRINT_LHEX(x)  VIF_STATE_PRINT(x, "0x%lx")
+#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx")
+#define VIF_STATE_PRINT_HEX(x)   VIF_STATE_PRINT(x, "0x%x")
+
+#define VIF_STATE_PRINT_NSTR(x, len)                           \
+       do {                                                    \
+               memset(tmp_buf, 0, sizeof(tmp_buf));            \
+               memcpy(tmp_buf, wlvif->x,                       \
+                      min_t(u8, len, sizeof(tmp_buf) - 1));    \
+               res += scnprintf(buf + res, buf_size - res,     \
+                                #x " = %s\n", tmp_buf);        \
+       } while (0)
+
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               VIF_STATE_PRINT_INT(role_id);
+               VIF_STATE_PRINT_INT(bss_type);
+               VIF_STATE_PRINT_LHEX(flags);
+               VIF_STATE_PRINT_INT(p2p);
+               VIF_STATE_PRINT_INT(dev_role_id);
+               VIF_STATE_PRINT_INT(dev_hlid);
+
+               if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+                   wlvif->bss_type == BSS_TYPE_IBSS) {
+                       VIF_STATE_PRINT_INT(sta.hlid);
+                       VIF_STATE_PRINT_INT(sta.ba_rx_bitmap);
+                       VIF_STATE_PRINT_INT(sta.basic_rate_idx);
+                       VIF_STATE_PRINT_INT(sta.ap_rate_idx);
+                       VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
+               } else {
+                       VIF_STATE_PRINT_INT(ap.global_hlid);
+                       VIF_STATE_PRINT_INT(ap.bcast_hlid);
+                       VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]);
+                       VIF_STATE_PRINT_INT(ap.mgmt_rate_idx);
+                       VIF_STATE_PRINT_INT(ap.bcast_rate_idx);
+                       VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]);
+                       VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]);
+                       VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]);
+                       VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
+               }
+               VIF_STATE_PRINT_INT(last_tx_hlid);
+               VIF_STATE_PRINT_LHEX(links_map[0]);
+               VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
+               VIF_STATE_PRINT_INT(band);
+               VIF_STATE_PRINT_INT(channel);
+               VIF_STATE_PRINT_HEX(bitrate_masks[0]);
+               VIF_STATE_PRINT_HEX(bitrate_masks[1]);
+               VIF_STATE_PRINT_HEX(basic_rate_set);
+               VIF_STATE_PRINT_HEX(basic_rate);
+               VIF_STATE_PRINT_HEX(rate_set);
+               VIF_STATE_PRINT_INT(beacon_int);
+               VIF_STATE_PRINT_INT(default_key);
+               VIF_STATE_PRINT_INT(aid);
+               VIF_STATE_PRINT_INT(session_counter);
+               VIF_STATE_PRINT_INT(ps_poll_failures);
+               VIF_STATE_PRINT_INT(psm_entry_retry);
+               VIF_STATE_PRINT_INT(power_level);
+               VIF_STATE_PRINT_INT(rssi_thold);
+               VIF_STATE_PRINT_INT(last_rssi_event);
+               VIF_STATE_PRINT_INT(ba_support);
+               VIF_STATE_PRINT_INT(ba_allowed);
+               VIF_STATE_PRINT_LLHEX(tx_security_seq);
+               VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
+       }
+
+#undef VIF_STATE_PRINT_INT
+#undef VIF_STATE_PRINT_LONG
+#undef VIF_STATE_PRINT_HEX
+#undef VIF_STATE_PRINT_LHEX
+#undef VIF_STATE_PRINT_LLHEX
+#undef VIF_STATE_PRINT_STR
+#undef VIF_STATE_PRINT_NSTR
+#undef VIF_STATE_PRINT
+
+       mutex_unlock(&wl->mutex);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+       kfree(buf);
+       return ret;
+}
+
+static const struct file_operations vifs_state_ops = {
+       .read = vifs_state_read,
+       .open = wl1271_open_file_generic,
+       .llseek = default_llseek,
+};
+
 static ssize_t dtim_interval_read(struct file *file, char __user *user_buf,
                                  size_t count, loff_t *ppos)
 {
@@ -520,6 +625,7 @@ static ssize_t rx_streaming_interval_write(struct file *file,
                           size_t count, loff_t *ppos)
 {
        struct wl1271 *wl = file->private_data;
+       struct wl12xx_vif *wlvif;
        unsigned long value;
        int ret;
 
@@ -543,7 +649,9 @@ static ssize_t rx_streaming_interval_write(struct file *file,
        if (ret < 0)
                goto out;
 
-       wl1271_recalc_rx_streaming(wl);
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               wl1271_recalc_rx_streaming(wl, wlvif);
+       }
 
        wl1271_ps_elp_sleep(wl);
 out:
@@ -572,6 +680,7 @@ static ssize_t rx_streaming_always_write(struct file *file,
                           size_t count, loff_t *ppos)
 {
        struct wl1271 *wl = file->private_data;
+       struct wl12xx_vif *wlvif;
        unsigned long value;
        int ret;
 
@@ -595,7 +704,9 @@ static ssize_t rx_streaming_always_write(struct file *file,
        if (ret < 0)
                goto out;
 
-       wl1271_recalc_rx_streaming(wl);
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               wl1271_recalc_rx_streaming(wl, wlvif);
+       }
 
        wl1271_ps_elp_sleep(wl);
 out:
@@ -624,6 +735,7 @@ static ssize_t beacon_filtering_write(struct file *file,
                                      size_t count, loff_t *ppos)
 {
        struct wl1271 *wl = file->private_data;
+       struct wl12xx_vif *wlvif;
        char buf[10];
        size_t len;
        unsigned long value;
@@ -646,7 +758,9 @@ static ssize_t beacon_filtering_write(struct file *file,
        if (ret < 0)
                goto out;
 
-       ret = wl1271_acx_beacon_filter_opt(wl, !!value);
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
+       }
 
        wl1271_ps_elp_sleep(wl);
 out:
@@ -770,6 +884,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_ADD(gpio_power, rootdir);
        DEBUGFS_ADD(start_recovery, rootdir);
        DEBUGFS_ADD(driver_state, rootdir);
+       DEBUGFS_ADD(vifs_state, rootdir);
        DEBUGFS_ADD(dtim_interval, rootdir);
        DEBUGFS_ADD(beacon_interval, rootdir);
        DEBUGFS_ADD(beacon_filtering, rootdir);
index 674ad2a..00ce794 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "reg.h"
 #include "io.h"
 #include "event.h"
 
 void wl1271_pspoll_work(struct work_struct *work)
 {
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
        struct delayed_work *dwork;
        struct wl1271 *wl;
        int ret;
 
        dwork = container_of(work, struct delayed_work, work);
-       wl = container_of(dwork, struct wl1271, pspoll_work);
+       wlvif = container_of(dwork, struct wl12xx_vif, pspoll_work);
+       vif = container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+       wl = wlvif->wl;
 
        wl1271_debug(DEBUG_EVENT, "pspoll work");
 
@@ -45,10 +50,10 @@ void wl1271_pspoll_work(struct work_struct *work)
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags))
+       if (!test_and_clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags))
                goto out;
 
-       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                goto out;
 
        /*
@@ -60,31 +65,33 @@ void wl1271_pspoll_work(struct work_struct *work)
        if (ret < 0)
                goto out;
 
-       wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true);
+       wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+                          wlvif->basic_rate, true);
 
        wl1271_ps_elp_sleep(wl);
 out:
        mutex_unlock(&wl->mutex);
 };
 
-static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
+static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl,
+                                             struct wl12xx_vif *wlvif)
 {
        int delay = wl->conf.conn.ps_poll_recovery_period;
        int ret;
 
-       wl->ps_poll_failures++;
-       if (wl->ps_poll_failures == 1)
+       wlvif->ps_poll_failures++;
+       if (wlvif->ps_poll_failures == 1)
                wl1271_info("AP with dysfunctional ps-poll, "
                            "trying to work around it.");
 
        /* force active mode receive data from the AP */
-       if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
-               ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
-                                        wl->basic_rate, true);
+       if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
+               ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+                                        wlvif->basic_rate, true);
                if (ret < 0)
                        return;
-               set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
-               ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work,
+               set_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
+               ieee80211_queue_delayed_work(wl->hw, &wlvif->pspoll_work,
                                             msecs_to_jiffies(delay));
        }
 
@@ -97,6 +104,7 @@ static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
 }
 
 static int wl1271_event_ps_report(struct wl1271 *wl,
+                                 struct wl12xx_vif *wlvif,
                                  struct event_mailbox *mbox,
                                  bool *beacon_loss)
 {
@@ -109,41 +117,37 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
        case EVENT_ENTER_POWER_SAVE_FAIL:
                wl1271_debug(DEBUG_PSM, "PSM entry failed");
 
-               if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+               if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
                        /* remain in active mode */
-                       wl->psm_entry_retry = 0;
+                       wlvif->psm_entry_retry = 0;
                        break;
                }
 
-               if (wl->psm_entry_retry < total_retries) {
-                       wl->psm_entry_retry++;
-                       ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
-                                                wl->basic_rate, true);
+               if (wlvif->psm_entry_retry < total_retries) {
+                       wlvif->psm_entry_retry++;
+                       ret = wl1271_ps_set_mode(wl, wlvif,
+                                                STATION_POWER_SAVE_MODE,
+                                                wlvif->basic_rate, true);
                } else {
                        wl1271_info("No ack to nullfunc from AP.");
-                       wl->psm_entry_retry = 0;
+                       wlvif->psm_entry_retry = 0;
                        *beacon_loss = true;
                }
                break;
        case EVENT_ENTER_POWER_SAVE_SUCCESS:
-               wl->psm_entry_retry = 0;
-
-               /* enable beacon filtering */
-               ret = wl1271_acx_beacon_filter_opt(wl, true);
-               if (ret < 0)
-                       break;
+               wlvif->psm_entry_retry = 0;
 
                /*
                 * BET has only a minor effect in 5GHz and masks
                 * channel switch IEs, so we only enable BET on 2.4GHz
                */
-               if (wl->band == IEEE80211_BAND_2GHZ)
+               if (wlvif->band == IEEE80211_BAND_2GHZ)
                        /* enable beacon early termination */
-                       ret = wl1271_acx_bet_enable(wl, true);
+                       ret = wl1271_acx_bet_enable(wl, wlvif, true);
 
-               if (wl->ps_compl) {
-                       complete(wl->ps_compl);
-                       wl->ps_compl = NULL;
+               if (wlvif->ps_compl) {
+                       complete(wlvif->ps_compl);
+                       wlvif->ps_compl = NULL;
                }
                break;
        default:
@@ -154,39 +158,44 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
 }
 
 static void wl1271_event_rssi_trigger(struct wl1271 *wl,
+                                     struct wl12xx_vif *wlvif,
                                      struct event_mailbox *mbox)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        enum nl80211_cqm_rssi_threshold_event event;
        s8 metric = mbox->rssi_snr_trigger_metric[0];
 
        wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
 
-       if (metric <= wl->rssi_thold)
+       if (metric <= wlvif->rssi_thold)
                event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
        else
                event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
 
-       if (event != wl->last_rssi_event)
-               ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
-       wl->last_rssi_event = event;
+       if (event != wlvif->last_rssi_event)
+               ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+       wlvif->last_rssi_event = event;
 }
 
-static void wl1271_stop_ba_event(struct wl1271 *wl)
+static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
-       if (wl->bss_type != BSS_TYPE_AP_BSS) {
-               if (!wl->ba_rx_bitmap)
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
+       if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+               if (!wlvif->sta.ba_rx_bitmap)
                        return;
-               ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap,
-                                            wl->bssid);
+               ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap,
+                                            vif->bss_conf.bssid);
        } else {
-               int i;
+               u8 hlid;
                struct wl1271_link *lnk;
-               for (i = WL1271_AP_STA_HLID_START; i < AP_MAX_LINKS; i++) {
-                       lnk = &wl->links[i];
-                       if (!wl1271_is_active_sta(wl, i) || !lnk->ba_bitmap)
+               for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
+                                WL12XX_MAX_LINKS) {
+                       lnk = &wl->links[hlid];
+                       if (!lnk->ba_bitmap)
                                continue;
 
-                       ieee80211_stop_rx_ba_session(wl->vif,
+                       ieee80211_stop_rx_ba_session(vif,
                                                     lnk->ba_bitmap,
                                                     lnk->addr);
                }
@@ -196,14 +205,23 @@ static void wl1271_stop_ba_event(struct wl1271 *wl)
 static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
                                               u8 enable)
 {
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
+
        if (enable) {
                /* disable dynamic PS when requested by the firmware */
-               ieee80211_disable_dyn_ps(wl->vif);
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       vif = wl12xx_wlvif_to_vif(wlvif);
+                       ieee80211_disable_dyn_ps(vif);
+               }
                set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
        } else {
-               ieee80211_enable_dyn_ps(wl->vif);
                clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
-               wl1271_recalc_rx_streaming(wl);
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       vif = wl12xx_wlvif_to_vif(wlvif);
+                       ieee80211_enable_dyn_ps(vif);
+                       wl1271_recalc_rx_streaming(wl, wlvif);
+               }
        }
 
 }
@@ -217,10 +235,11 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
 
 static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
 {
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
        int ret;
        u32 vector;
        bool beacon_loss = false;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
        bool disconnect_sta = false;
        unsigned long sta_bitmap = 0;
 
@@ -234,7 +253,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
                wl1271_debug(DEBUG_EVENT, "status: 0x%x",
                             mbox->scheduled_scan_status);
 
-               wl1271_scan_stm(wl);
+               wl1271_scan_stm(wl, wl->scan_vif);
        }
 
        if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
@@ -253,8 +272,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
                }
        }
 
-       if (vector & SOFT_GEMINI_SENSE_EVENT_ID &&
-           wl->bss_type == BSS_TYPE_STA_BSS)
+       if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
                wl12xx_event_soft_gemini_sense(wl,
                                               mbox->soft_gemini_sense_info);
 
@@ -267,40 +285,54 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
         * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
         *
         */
-       if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
+       if (vector & BSS_LOSE_EVENT_ID) {
+               /* TODO: check for multi-role */
                wl1271_info("Beacon loss detected.");
 
                /* indicate to the stack, that beacons have been lost */
                beacon_loss = true;
        }
 
-       if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
+       if (vector & PS_REPORT_EVENT_ID) {
                wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
-               ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
-               if (ret < 0)
-                       return ret;
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       ret = wl1271_event_ps_report(wl, wlvif,
+                                                    mbox, &beacon_loss);
+                       if (ret < 0)
+                               return ret;
+               }
        }
 
-       if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
-               wl1271_event_pspoll_delivery_fail(wl);
+       if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       wl1271_event_pspoll_delivery_fail(wl, wlvif);
+               }
 
        if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
+               /* TODO: check actual multi-role support */
                wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
-               if (wl->vif)
-                       wl1271_event_rssi_trigger(wl, mbox);
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       wl1271_event_rssi_trigger(wl, wlvif, mbox);
+               }
        }
 
-       if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)) {
+       if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) {
+               u8 role_id = mbox->role_id;
                wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
-                            "ba_allowed = 0x%x", mbox->rx_ba_allowed);
+                            "ba_allowed = 0x%x, role_id=%d",
+                            mbox->rx_ba_allowed, role_id);
 
-               wl->ba_allowed = !!mbox->rx_ba_allowed;
+               wl12xx_for_each_wlvif(wl, wlvif) {
+                       if (role_id != 0xff && role_id != wlvif->role_id)
+                               continue;
 
-               if (wl->vif && !wl->ba_allowed)
-                       wl1271_stop_ba_event(wl);
+                       wlvif->ba_allowed = !!mbox->rx_ba_allowed;
+                       if (!wlvif->ba_allowed)
+                               wl1271_stop_ba_event(wl, wlvif);
+               }
        }
 
-       if ((vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) && !is_ap) {
+       if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) {
                wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
                                          "status = 0x%x",
                                          mbox->channel_switch_status);
@@ -309,50 +341,65 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
                 * 1) channel switch complete with status=0
                 * 2) channel switch failed status=1
                 */
-               if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags) &&
-                   (wl->vif))
-                       ieee80211_chswitch_done(wl->vif,
-                               mbox->channel_switch_status ? false : true);
+
+               /* TODO: configure only the relevant vif */
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+                       bool success;
+
+                       if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
+                                               &wl->flags))
+                               continue;
+
+                       success = mbox->channel_switch_status ? false : true;
+                       ieee80211_chswitch_done(vif, success);
+               }
        }
 
        if ((vector & DUMMY_PACKET_EVENT_ID)) {
                wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
-               if (wl->vif)
-                       wl1271_tx_dummy_packet(wl);
+               wl1271_tx_dummy_packet(wl);
        }
 
        /*
         * "TX retries exceeded" has a different meaning according to mode.
         * In AP mode the offending station is disconnected.
         */
-       if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) {
+       if (vector & MAX_TX_RETRY_EVENT_ID) {
                wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
                sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
                disconnect_sta = true;
        }
 
-       if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) {
+       if (vector & INACTIVE_STA_EVENT_ID) {
                wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
                sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
                disconnect_sta = true;
        }
 
-       if (is_ap && disconnect_sta) {
+       if (disconnect_sta) {
                u32 num_packets = wl->conf.tx.max_tx_retries;
                struct ieee80211_sta *sta;
                const u8 *addr;
                int h;
 
-               for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS);
-                    h < AP_MAX_LINKS;
-                    h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) {
-                       if (!wl1271_is_active_sta(wl, h))
+               for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+                       bool found = false;
+                       /* find the ap vif connected to this sta */
+                       wl12xx_for_each_wlvif_ap(wl, wlvif) {
+                               if (!test_bit(h, wlvif->ap.sta_hlid_map))
+                                       continue;
+                               found = true;
+                               break;
+                       }
+                       if (!found)
                                continue;
 
+                       vif = wl12xx_wlvif_to_vif(wlvif);
                        addr = wl->links[h].addr;
 
                        rcu_read_lock();
-                       sta = ieee80211_find_sta(wl->vif, addr);
+                       sta = ieee80211_find_sta(vif, addr);
                        if (sta) {
                                wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
                                ieee80211_report_low_ack(sta, num_packets);
@@ -361,8 +408,11 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
                }
        }
 
-       if (wl->vif && beacon_loss)
-               ieee80211_connection_loss(wl->vif);
+       if (beacon_loss)
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       vif = wl12xx_wlvif_to_vif(wlvif);
+                       ieee80211_connection_loss(vif);
+               }
 
        return 0;
 }
index 49c1a0e..1d878ba 100644 (file)
@@ -132,7 +132,4 @@ void wl1271_event_mbox_config(struct wl1271 *wl);
 int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
 void wl1271_pspoll_work(struct work_struct *work);
 
-/* Functions from main.c */
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid);
-
 #endif
index 04db64c..ca7ee59 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 
+#include "debug.h"
 #include "init.h"
 #include "wl12xx_80211.h"
 #include "acx.h"
@@ -33,7 +34,7 @@
 #include "tx.h"
 #include "io.h"
 
-int wl1271_sta_init_templates_config(struct wl1271 *wl)
+int wl1271_init_templates_config(struct wl1271 *wl)
 {
        int ret, i;
 
@@ -64,7 +65,7 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
 
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
                                      sizeof
-                                     (struct wl12xx_qos_null_data_template),
+                                     (struct ieee80211_qos_hdr),
                                      0, WL1271_RATE_AUTOMATIC);
        if (ret < 0)
                return ret;
@@ -88,10 +89,33 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
+       /*
+        * Put very large empty placeholders for all templates. These
+        * reserve memory for later.
+        */
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+                                     WL1271_CMD_TEMPL_MAX_SIZE,
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+                                     WL1271_CMD_TEMPL_MAX_SIZE,
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+                                     sizeof
+                                     (struct wl12xx_disconn_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
        for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
                ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
-                                             WL1271_CMD_TEMPL_DFLT_SIZE, i,
-                                             WL1271_RATE_AUTOMATIC);
+                                             sizeof(struct ieee80211_qos_hdr),
+                                             i, WL1271_RATE_AUTOMATIC);
                if (ret < 0)
                        return ret;
        }
@@ -99,7 +123,8 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
        return 0;
 }
 
-static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl,
+                                         struct wl12xx_vif *wlvif)
 {
        struct wl12xx_disconn_template *tmpl;
        int ret;
@@ -114,7 +139,7 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
        tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                             IEEE80211_STYPE_DEAUTH);
 
-       rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+       rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
                                      tmpl, sizeof(*tmpl), 0, rate);
 
@@ -123,8 +148,10 @@ out:
        return ret;
 }
 
-static int wl1271_ap_init_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_null_template(struct wl1271 *wl,
+                                       struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct ieee80211_hdr_3addr *nullfunc;
        int ret;
        u32 rate;
@@ -141,10 +168,10 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl)
 
        /* nullfunc->addr1 is filled by FW */
 
-       memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
-       memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+       memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
+       memcpy(nullfunc->addr3, vif->addr, ETH_ALEN);
 
-       rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+       rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
                                      sizeof(*nullfunc), 0, rate);
 
@@ -153,8 +180,10 @@ out:
        return ret;
 }
 
-static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl,
+                                           struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct ieee80211_qos_hdr *qosnull;
        int ret;
        u32 rate;
@@ -171,10 +200,10 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
 
        /* qosnull->addr1 is filled by FW */
 
-       memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
-       memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+       memcpy(qosnull->addr2, vif->addr, ETH_ALEN);
+       memcpy(qosnull->addr3, vif->addr, ETH_ALEN);
 
-       rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+       rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
                                      sizeof(*qosnull), 0, rate);
 
@@ -183,49 +212,6 @@ out:
        return ret;
 }
 
-static int wl1271_ap_init_templates_config(struct wl1271 *wl)
-{
-       int ret;
-
-       /*
-        * Put very large empty placeholders for all templates. These
-        * reserve memory for later.
-        */
-       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
-                                     WL1271_CMD_TEMPL_MAX_SIZE,
-                                     0, WL1271_RATE_AUTOMATIC);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
-                                     WL1271_CMD_TEMPL_MAX_SIZE,
-                                     0, WL1271_RATE_AUTOMATIC);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
-                                     sizeof
-                                     (struct wl12xx_disconn_template),
-                                     0, WL1271_RATE_AUTOMATIC);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
-                                     sizeof(struct wl12xx_null_data_template),
-                                     0, WL1271_RATE_AUTOMATIC);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
-                                     sizeof
-                                     (struct wl12xx_qos_null_data_template),
-                                     0, WL1271_RATE_AUTOMATIC);
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-
 static int wl12xx_init_rx_config(struct wl1271 *wl)
 {
        int ret;
@@ -237,39 +223,37 @@ static int wl12xx_init_rx_config(struct wl1271 *wl)
        return 0;
 }
 
-int wl1271_init_phy_config(struct wl1271 *wl)
+static int wl12xx_init_phy_vif_config(struct wl1271 *wl,
+                                           struct wl12xx_vif *wlvif)
 {
        int ret;
 
-       ret = wl1271_acx_pd_threshold(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME);
+       ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME);
        if (ret < 0)
                return ret;
 
-       ret = wl1271_acx_service_period_timeout(wl);
+       ret = wl1271_acx_service_period_timeout(wl, wlvif);
        if (ret < 0)
                return ret;
 
-       ret = wl1271_acx_rts_threshold(wl, wl->hw->wiphy->rts_threshold);
+       ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static int wl1271_init_beacon_filter(struct wl1271 *wl)
+static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
+                                        struct wl12xx_vif *wlvif)
 {
        int ret;
 
-       /* disable beacon filtering at this stage */
-       ret = wl1271_acx_beacon_filter_opt(wl, false);
+       ret = wl1271_acx_beacon_filter_table(wl, wlvif);
        if (ret < 0)
                return ret;
 
-       ret = wl1271_acx_beacon_filter_table(wl);
+       /* enable beacon filtering */
+       ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
        if (ret < 0)
                return ret;
 
@@ -302,11 +286,12 @@ int wl1271_init_energy_detection(struct wl1271 *wl)
        return 0;
 }
 
-static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
+static int wl1271_init_beacon_broadcast(struct wl1271 *wl,
+                                       struct wl12xx_vif *wlvif)
 {
        int ret;
 
-       ret = wl1271_acx_bcn_dtim_options(wl);
+       ret = wl1271_acx_bcn_dtim_options(wl, wlvif);
        if (ret < 0)
                return ret;
 
@@ -327,36 +312,13 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
        return 0;
 }
 
-static int wl1271_sta_hw_init(struct wl1271 *wl)
+/* generic sta initialization (non vif-specific) */
+static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
 
-       if (wl->chip.id != CHIP_ID_1283_PG20) {
-               ret = wl1271_cmd_ext_radio_parms(wl);
-               if (ret < 0)
-                       return ret;
-       }
-
        /* PS config */
-       ret = wl1271_acx_config_ps(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_sta_init_templates_config(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
-       if (ret < 0)
-               return ret;
-
-       /* Initialize connection monitoring thresholds */
-       ret = wl1271_acx_conn_monit_params(wl, false);
-       if (ret < 0)
-               return ret;
-
-       /* Beacon filtering */
-       ret = wl1271_init_beacon_filter(wl);
+       ret = wl12xx_acx_config_ps(wl, wlvif);
        if (ret < 0)
                return ret;
 
@@ -365,103 +327,61 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       /* Beacons and broadcast settings */
-       ret = wl1271_init_beacon_broadcast(wl);
-       if (ret < 0)
-               return ret;
-
-       /* Configure for ELP power saving */
-       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
-       if (ret < 0)
-               return ret;
-
-       /* Configure rssi/snr averaging weights */
-       ret = wl1271_acx_rssi_snr_avg_weights(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_acx_sta_rate_policies(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl12xx_acx_mem_cfg(wl);
-       if (ret < 0)
-               return ret;
-
-       /* Configure the FW logger */
-       ret = wl12xx_init_fwlog(wl);
+       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
+                                      struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret, i;
 
        /* disable all keep-alive templates */
        for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
-               ret = wl1271_acx_keep_alive_config(wl, i,
+               ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
                                                   ACX_KEEP_ALIVE_TPL_INVALID);
                if (ret < 0)
                        return ret;
        }
 
        /* disable the keep-alive feature */
-       ret = wl1271_acx_keep_alive_mode(wl, false);
+       ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static int wl1271_ap_hw_init(struct wl1271 *wl)
+/* generic ap initialization (non vif-specific) */
+static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
 
-       ret = wl1271_ap_init_templates_config(wl);
-       if (ret < 0)
-               return ret;
-
-       /* Configure for power always on */
-       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_init_ap_rates(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_acx_ap_max_tx_retry(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl12xx_acx_mem_cfg(wl);
-       if (ret < 0)
-               return ret;
-
-       /* initialize Tx power */
-       ret = wl1271_acx_tx_power(wl, wl->power_level);
+       ret = wl1271_init_ap_rates(wl, wlvif);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-int wl1271_ap_init_templates(struct wl1271 *wl)
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret;
 
-       ret = wl1271_ap_init_deauth_template(wl);
+       ret = wl1271_ap_init_deauth_template(wl, wlvif);
        if (ret < 0)
                return ret;
 
-       ret = wl1271_ap_init_null_template(wl);
+       ret = wl1271_ap_init_null_template(wl, vif);
        if (ret < 0)
                return ret;
 
-       ret = wl1271_ap_init_qos_null_template(wl);
+       ret = wl1271_ap_init_qos_null_template(wl, vif);
        if (ret < 0)
                return ret;
 
@@ -469,43 +389,45 @@ int wl1271_ap_init_templates(struct wl1271 *wl)
         * when operating as AP we want to receive external beacons for
         * configuring ERP protection.
         */
-       ret = wl1271_acx_beacon_filter_opt(wl, false);
+       ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl,
+                                     struct ieee80211_vif *vif)
 {
-       return wl1271_ap_init_templates(wl);
+       return wl1271_ap_init_templates(wl, vif);
 }
 
-int wl1271_init_ap_rates(struct wl1271 *wl)
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int i, ret;
        struct conf_tx_rate_class rc;
        u32 supported_rates;
 
-       wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", wl->basic_rate_set);
+       wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x",
+                    wlvif->basic_rate_set);
 
-       if (wl->basic_rate_set == 0)
+       if (wlvif->basic_rate_set == 0)
                return -EINVAL;
 
-       rc.enabled_rates = wl->basic_rate_set;
+       rc.enabled_rates = wlvif->basic_rate_set;
        rc.long_retry_limit = 10;
        rc.short_retry_limit = 10;
        rc.aflags = 0;
-       ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_MGMT_RATE);
+       ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx);
        if (ret < 0)
                return ret;
 
        /* use the min basic rate for AP broadcast/multicast */
-       rc.enabled_rates = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+       rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
        rc.short_retry_limit = 10;
        rc.long_retry_limit = 10;
        rc.aflags = 0;
-       ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_BCST_RATE);
+       ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx);
        if (ret < 0)
                return ret;
 
@@ -513,7 +435,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
         * If the basic rates contain OFDM rates, use OFDM only
         * rates for unicast TX as well. Else use all supported rates.
         */
-       if ((wl->basic_rate_set & CONF_TX_OFDM_RATES))
+       if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
                supported_rates = CONF_TX_OFDM_RATES;
        else
                supported_rates = CONF_TX_AP_ENABLED_RATES;
@@ -527,7 +449,8 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
                rc.short_retry_limit = 10;
                rc.long_retry_limit = 10;
                rc.aflags = 0;
-               ret = wl1271_acx_ap_rate_policy(wl, &rc, i);
+               ret = wl1271_acx_ap_rate_policy(wl, &rc,
+                                               wlvif->ap.ucast_rate_idx[i]);
                if (ret < 0)
                        return ret;
        }
@@ -535,24 +458,23 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
        return 0;
 }
 
-static int wl1271_set_ba_policies(struct wl1271 *wl)
+static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        /* Reset the BA RX indicators */
-       wl->ba_rx_bitmap = 0;
-       wl->ba_allowed = true;
+       wlvif->ba_allowed = true;
        wl->ba_rx_session_count = 0;
 
        /* BA is supported in STA/AP modes */
-       if (wl->bss_type != BSS_TYPE_AP_BSS &&
-           wl->bss_type != BSS_TYPE_STA_BSS) {
-               wl->ba_support = false;
+       if (wlvif->bss_type != BSS_TYPE_AP_BSS &&
+           wlvif->bss_type != BSS_TYPE_STA_BSS) {
+               wlvif->ba_support = false;
                return 0;
        }
 
-       wl->ba_support = true;
+       wlvif->ba_support = true;
 
        /* 802.11n initiator BA session setting */
-       return wl12xx_acx_set_ba_initiator_policy(wl);
+       return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
 }
 
 int wl1271_chip_specific_init(struct wl1271 *wl)
@@ -562,7 +484,7 @@ int wl1271_chip_specific_init(struct wl1271 *wl)
        if (wl->chip.id == CHIP_ID_1283_PG20) {
                u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
 
-               if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
+               if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT))
                        /* Enable SDIO padding */
                        host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
 
@@ -575,39 +497,186 @@ out:
        return ret;
 }
 
+/* vif-specifc initialization */
+static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       int ret;
 
-int wl1271_hw_init(struct wl1271 *wl)
+       ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0);
+       if (ret < 0)
+               return ret;
+
+       /* Initialize connection monitoring thresholds */
+       ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
+       if (ret < 0)
+               return ret;
+
+       /* Beacon filtering */
+       ret = wl1271_init_sta_beacon_filter(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       /* Beacons and broadcast settings */
+       ret = wl1271_init_beacon_broadcast(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       /* Configure rssi/snr averaging weights */
+       ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+/* vif-specific intialization */
+static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
+       int ret;
+
+       ret = wl1271_acx_ap_max_tx_retry(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       /* initialize Tx power */
+       ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
+{
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct conf_tx_ac_category *conf_ac;
        struct conf_tx_tid *conf_tid;
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
        int ret, i;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
 
-       if (wl->chip.id == CHIP_ID_1283_PG20)
-               ret = wl128x_cmd_general_parms(wl);
-       else
-               ret = wl1271_cmd_general_parms(wl);
+       /*
+        * consider all existing roles before configuring psm.
+        * TODO: reconfigure on interface removal.
+        */
+       if (!wl->ap_count) {
+               if (is_ap) {
+                       /* Configure for power always on */
+                       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+                       if (ret < 0)
+                               return ret;
+               } else if (!wl->sta_count) {
+                       /* Configure for ELP power saving */
+                       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+
+       /* Mode specific init */
+       if (is_ap) {
+               ret = wl1271_ap_hw_init(wl, wlvif);
+               if (ret < 0)
+                       return ret;
+
+               ret = wl12xx_init_ap_role(wl, wlvif);
+               if (ret < 0)
+                       return ret;
+       } else {
+               ret = wl1271_sta_hw_init(wl, wlvif);
+               if (ret < 0)
+                       return ret;
+
+               ret = wl12xx_init_sta_role(wl, wlvif);
+               if (ret < 0)
+                       return ret;
+       }
+
+       wl12xx_init_phy_vif_config(wl, wlvif);
+
+       /* Default TID/AC configuration */
+       BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
+       for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+               conf_ac = &wl->conf.tx.ac_conf[i];
+               ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac,
+                                       conf_ac->cw_min, conf_ac->cw_max,
+                                       conf_ac->aifsn, conf_ac->tx_op_limit);
+               if (ret < 0)
+                       return ret;
+
+               conf_tid = &wl->conf.tx.tid_conf[i];
+               ret = wl1271_acx_tid_cfg(wl, wlvif,
+                                        conf_tid->queue_id,
+                                        conf_tid->channel_type,
+                                        conf_tid->tsid,
+                                        conf_tid->ps_scheme,
+                                        conf_tid->ack_policy,
+                                        conf_tid->apsd_conf[0],
+                                        conf_tid->apsd_conf[1]);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /* Configure HW encryption */
+       ret = wl1271_acx_feature_cfg(wl, wlvif);
        if (ret < 0)
                return ret;
 
-       if (wl->chip.id == CHIP_ID_1283_PG20)
-               ret = wl128x_cmd_radio_parms(wl);
+       /* Mode specific init - post mem init */
+       if (is_ap)
+               ret = wl1271_ap_hw_init_post_mem(wl, vif);
        else
-               ret = wl1271_cmd_radio_parms(wl);
+               ret = wl1271_sta_hw_init_post_mem(wl, vif);
+
+       if (ret < 0)
+               return ret;
+
+       /* Configure initiator BA sessions policies */
+       ret = wl1271_set_ba_policies(wl, wlvif);
        if (ret < 0)
                return ret;
 
+       return 0;
+}
+
+int wl1271_hw_init(struct wl1271 *wl)
+{
+       int ret;
+
+       if (wl->chip.id == CHIP_ID_1283_PG20) {
+               ret = wl128x_cmd_general_parms(wl);
+               if (ret < 0)
+                       return ret;
+               ret = wl128x_cmd_radio_parms(wl);
+               if (ret < 0)
+                       return ret;
+       } else {
+               ret = wl1271_cmd_general_parms(wl);
+               if (ret < 0)
+                       return ret;
+               ret = wl1271_cmd_radio_parms(wl);
+               if (ret < 0)
+                       return ret;
+               ret = wl1271_cmd_ext_radio_parms(wl);
+               if (ret < 0)
+                       return ret;
+       }
+
        /* Chip-specific init */
        ret = wl1271_chip_specific_init(wl);
        if (ret < 0)
                return ret;
 
-       /* Mode specific init */
-       if (is_ap)
-               ret = wl1271_ap_hw_init(wl);
-       else
-               ret = wl1271_sta_hw_init(wl);
+       /* Init templates */
+       ret = wl1271_init_templates_config(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl12xx_acx_mem_cfg(wl);
+       if (ret < 0)
+               return ret;
 
+       /* Configure the FW logger */
+       ret = wl12xx_init_fwlog(wl);
        if (ret < 0)
                return ret;
 
@@ -626,11 +695,6 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
-       /* PHY layer config */
-       ret = wl1271_init_phy_config(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        ret = wl1271_acx_dco_itrim_params(wl);
        if (ret < 0)
                goto out_free_memmap;
@@ -655,61 +719,20 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Default TID/AC configuration */
-       BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
-       for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
-               conf_ac = &wl->conf.tx.ac_conf[i];
-               ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
-                                       conf_ac->cw_max, conf_ac->aifsn,
-                                       conf_ac->tx_op_limit);
-               if (ret < 0)
-                       goto out_free_memmap;
-
-               conf_tid = &wl->conf.tx.tid_conf[i];
-               ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
-                                        conf_tid->channel_type,
-                                        conf_tid->tsid,
-                                        conf_tid->ps_scheme,
-                                        conf_tid->ack_policy,
-                                        conf_tid->apsd_conf[0],
-                                        conf_tid->apsd_conf[1]);
-               if (ret < 0)
-                       goto out_free_memmap;
-       }
-
        /* Enable data path */
        ret = wl1271_cmd_data_path(wl, 1);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Configure HW encryption */
-       ret = wl1271_acx_feature_cfg(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* configure PM */
        ret = wl1271_acx_pm_config(wl);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Mode specific init - post mem init */
-       if (is_ap)
-               ret = wl1271_ap_hw_init_post_mem(wl);
-       else
-               ret = wl1271_sta_hw_init_post_mem(wl);
-
-       if (ret < 0)
-               goto out_free_memmap;
-
        ret = wl12xx_acx_set_rate_mgmt_params(wl);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Configure initiator BA sessions policies */
-       ret = wl1271_set_ba_policies(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* configure hangover */
        ret = wl12xx_acx_config_hangover(wl);
        if (ret < 0)
index 3a3c230..2da0f40 100644 (file)
 #include "wl12xx.h"
 
 int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_sta_init_templates_config(struct wl1271 *wl);
-int wl1271_init_phy_config(struct wl1271 *wl);
+int wl1271_init_templates_config(struct wl1271 *wl);
 int wl1271_init_pta(struct wl1271 *wl);
 int wl1271_init_energy_detection(struct wl1271 *wl);
 int wl1271_chip_specific_init(struct wl1271 *wl);
 int wl1271_hw_init(struct wl1271 *wl);
-int wl1271_init_ap_rates(struct wl1271 *wl);
-int wl1271_ap_init_templates(struct wl1271 *wl);
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
 
 #endif
index c2da66f..079ad38 100644 (file)
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
+#include <linux/interrupt.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "wl12xx_80211.h"
 #include "io.h"
 #include "tx.h"
@@ -46,7 +48,7 @@
 bool wl1271_set_block_size(struct wl1271 *wl)
 {
        if (wl->if_ops->set_block_size) {
-               wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE);
+               wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE);
                return true;
        }
 
@@ -55,12 +57,12 @@ bool wl1271_set_block_size(struct wl1271 *wl)
 
 void wl1271_disable_interrupts(struct wl1271 *wl)
 {
-       wl->if_ops->disable_irq(wl);
+       disable_irq(wl->irq);
 }
 
 void wl1271_enable_interrupts(struct wl1271 *wl)
 {
-       wl->if_ops->enable_irq(wl);
+       enable_irq(wl->irq);
 }
 
 /* Set the SPI partitions to access the chip addresses
@@ -128,13 +130,13 @@ EXPORT_SYMBOL_GPL(wl1271_set_partition);
 void wl1271_io_reset(struct wl1271 *wl)
 {
        if (wl->if_ops->reset)
-               wl->if_ops->reset(wl);
+               wl->if_ops->reset(wl->dev);
 }
 
 void wl1271_io_init(struct wl1271 *wl)
 {
        if (wl->if_ops->init)
-               wl->if_ops->init(wl);
+               wl->if_ops->init(wl->dev);
 }
 
 void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
index e839341..d398cbc 100644 (file)
@@ -51,23 +51,17 @@ void wl1271_enable_interrupts(struct wl1271 *wl);
 void wl1271_io_reset(struct wl1271 *wl);
 void wl1271_io_init(struct wl1271 *wl);
 
-static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
-{
-       return wl->if_ops->dev(wl);
-}
-
-
 /* Raw target IO, address is not translated */
 static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
                                    size_t len, bool fixed)
 {
-       wl->if_ops->write(wl, addr, buf, len, fixed);
+       wl->if_ops->write(wl->dev, addr, buf, len, fixed);
 }
 
 static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
                                   size_t len, bool fixed)
 {
-       wl->if_ops->read(wl, addr, buf, len, fixed);
+       wl->if_ops->read(wl->dev, addr, buf, len, fixed);
 }
 
 static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
@@ -155,13 +149,13 @@ static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
 
 static inline void wl1271_power_off(struct wl1271 *wl)
 {
-       wl->if_ops->power(wl, false);
+       wl->if_ops->power(wl->dev, false);
        clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
 }
 
 static inline int wl1271_power_on(struct wl1271 *wl)
 {
-       int ret = wl->if_ops->power(wl, true);
+       int ret = wl->if_ops->power(wl->dev, true);
        if (ret == 0)
                set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
 
@@ -176,15 +170,10 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
 int wl1271_set_partition(struct wl1271 *wl,
                         struct wl1271_partition_set *p);
 
+bool wl1271_set_block_size(struct wl1271 *wl);
+
 /* Functions from wl1271_main.c */
 
-int wl1271_register_hw(struct wl1271 *wl);
-void wl1271_unregister_hw(struct wl1271 *wl);
-int wl1271_init_ieee80211(struct wl1271 *wl);
-struct ieee80211_hw *wl1271_alloc_hw(void);
-int wl1271_free_hw(struct wl1271 *wl);
-irqreturn_t wl1271_irq(int irq, void *data);
-bool wl1271_set_block_size(struct wl1271 *wl);
 int wl1271_tx_dummy_packet(struct wl1271 *wl);
 
 #endif
index 884f82b..c305841 100644 (file)
 #include <linux/slab.h>
 #include <linux/wl12xx.h>
 #include <linux/sched.h>
+#include <linux/interrupt.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "wl12xx_80211.h"
 #include "reg.h"
 #include "io.h"
@@ -377,42 +379,30 @@ static char *fwlog_param;
 static bool bug_on_recovery;
 
 static void __wl1271_op_remove_interface(struct wl1271 *wl,
+                                        struct ieee80211_vif *vif,
                                         bool reset_tx_queues);
-static void wl1271_free_ap_keys(struct wl1271 *wl);
-
-
-static void wl1271_device_release(struct device *dev)
-{
-
-}
-
-static struct platform_device wl1271_device = {
-       .name           = "wl1271",
-       .id             = -1,
-
-       /* device model insists to have a release function */
-       .dev            = {
-               .release = wl1271_device_release,
-       },
-};
+static void wl1271_op_stop(struct ieee80211_hw *hw);
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 
 static DEFINE_MUTEX(wl_list_mutex);
 static LIST_HEAD(wl_list);
 
-static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate)
+static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                 unsigned char operstate)
 {
        int ret;
+
        if (operstate != IF_OPER_UP)
                return 0;
 
-       if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags))
+       if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
                return 0;
 
-       ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid);
+       ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
        if (ret < 0)
                return ret;
 
-       wl12xx_croc(wl, wl->role_id);
+       wl12xx_croc(wl, wlvif->role_id);
 
        wl1271_info("Association completed.");
        return 0;
@@ -426,6 +416,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
        struct ieee80211_hw *hw;
        struct wl1271 *wl;
        struct wl1271 *wl_temp;
+       struct wl12xx_vif *wlvif;
        int ret = 0;
 
        /* Check that this notification is for us. */
@@ -459,17 +450,18 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
        if (wl->state == WL1271_STATE_OFF)
                goto out;
 
-       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
-               goto out;
-
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto out;
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+                       continue;
 
-       wl1271_check_operstate(wl, dev->operstate);
+               ret = wl1271_ps_elp_wakeup(wl);
+               if (ret < 0)
+                       goto out;
 
-       wl1271_ps_elp_sleep(wl);
+               wl1271_check_operstate(wl, wlvif, dev->operstate);
 
+               wl1271_ps_elp_sleep(wl);
+       }
 out:
        mutex_unlock(&wl->mutex);
 
@@ -498,19 +490,20 @@ static int wl1271_reg_notify(struct wiphy *wiphy,
        return 0;
 }
 
-static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable)
+static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                  bool enable)
 {
        int ret = 0;
 
        /* we should hold wl->mutex */
-       ret = wl1271_acx_ps_rx_streaming(wl, enable);
+       ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
        if (ret < 0)
                goto out;
 
        if (enable)
-               set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+               set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
        else
-               clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+               clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
 out:
        return ret;
 }
@@ -519,25 +512,25 @@ out:
  * this function is being called when the rx_streaming interval
  * has beed changed or rx_streaming should be disabled
  */
-int wl1271_recalc_rx_streaming(struct wl1271 *wl)
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret = 0;
        int period = wl->conf.rx_streaming.interval;
 
        /* don't reconfigure if rx_streaming is disabled */
-       if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
                goto out;
 
        /* reconfigure/disable according to new streaming_period */
        if (period &&
-           test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) &&
+           test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
            (wl->conf.rx_streaming.always ||
             test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
-               ret = wl1271_set_rx_streaming(wl, true);
+               ret = wl1271_set_rx_streaming(wl, wlvif, true);
        else {
-               ret = wl1271_set_rx_streaming(wl, false);
+               ret = wl1271_set_rx_streaming(wl, wlvif, false);
                /* don't cancel_work_sync since we might deadlock */
-               del_timer_sync(&wl->rx_streaming_timer);
+               del_timer_sync(&wlvif->rx_streaming_timer);
        }
 out:
        return ret;
@@ -546,13 +539,14 @@ out:
 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
 {
        int ret;
-       struct wl1271 *wl =
-               container_of(work, struct wl1271, rx_streaming_enable_work);
+       struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+                                               rx_streaming_enable_work);
+       struct wl1271 *wl = wlvif->wl;
 
        mutex_lock(&wl->mutex);
 
-       if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) ||
-           !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
+       if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
+           !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
            (!wl->conf.rx_streaming.always &&
             !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
                goto out;
@@ -564,12 +558,12 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
        if (ret < 0)
                goto out;
 
-       ret = wl1271_set_rx_streaming(wl, true);
+       ret = wl1271_set_rx_streaming(wl, wlvif, true);
        if (ret < 0)
                goto out_sleep;
 
        /* stop it after some time of inactivity */
-       mod_timer(&wl->rx_streaming_timer,
+       mod_timer(&wlvif->rx_streaming_timer,
                  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
 
 out_sleep:
@@ -581,19 +575,20 @@ out:
 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
 {
        int ret;
-       struct wl1271 *wl =
-               container_of(work, struct wl1271, rx_streaming_disable_work);
+       struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+                                               rx_streaming_disable_work);
+       struct wl1271 *wl = wlvif->wl;
 
        mutex_lock(&wl->mutex);
 
-       if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
-       ret = wl1271_set_rx_streaming(wl, false);
+       ret = wl1271_set_rx_streaming(wl, wlvif, false);
        if (ret)
                goto out_sleep;
 
@@ -605,8 +600,9 @@ out:
 
 static void wl1271_rx_streaming_timer(unsigned long data)
 {
-       struct wl1271 *wl = (struct wl1271 *)data;
-       ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work);
+       struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+       struct wl1271 *wl = wlvif->wl;
+       ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
 }
 
 static void wl1271_conf_init(struct wl1271 *wl)
@@ -645,9 +641,7 @@ static void wl1271_conf_init(struct wl1271 *wl)
 
 static int wl1271_plt_init(struct wl1271 *wl)
 {
-       struct conf_tx_ac_category *conf_ac;
-       struct conf_tx_tid *conf_tid;
-       int ret, i;
+       int ret;
 
        if (wl->chip.id == CHIP_ID_1283_PG20)
                ret = wl128x_cmd_general_parms(wl);
@@ -676,74 +670,14 @@ static int wl1271_plt_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       ret = wl1271_sta_init_templates_config(wl);
-       if (ret < 0)
-               return ret;
-
        ret = wl1271_acx_init_mem_config(wl);
        if (ret < 0)
                return ret;
 
-       /* PHY layer config */
-       ret = wl1271_init_phy_config(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       ret = wl1271_acx_dco_itrim_params(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* Initialize connection monitoring thresholds */
-       ret = wl1271_acx_conn_monit_params(wl, false);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* Bluetooth WLAN coexistence */
-       ret = wl1271_init_pta(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* FM WLAN coexistence */
-       ret = wl1271_acx_fm_coex(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* Energy detection */
-       ret = wl1271_init_energy_detection(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        ret = wl12xx_acx_mem_cfg(wl);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Default fragmentation threshold */
-       ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* Default TID/AC configuration */
-       BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
-       for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
-               conf_ac = &wl->conf.tx.ac_conf[i];
-               ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
-                                       conf_ac->cw_max, conf_ac->aifsn,
-                                       conf_ac->tx_op_limit);
-               if (ret < 0)
-                       goto out_free_memmap;
-
-               conf_tid = &wl->conf.tx.tid_conf[i];
-               ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
-                                        conf_tid->channel_type,
-                                        conf_tid->tsid,
-                                        conf_tid->ps_scheme,
-                                        conf_tid->ack_policy,
-                                        conf_tid->apsd_conf[0],
-                                        conf_tid->apsd_conf[1]);
-               if (ret < 0)
-                       goto out_free_memmap;
-       }
-
        /* Enable data path */
        ret = wl1271_cmd_data_path(wl, 1);
        if (ret < 0)
@@ -768,14 +702,12 @@ static int wl1271_plt_init(struct wl1271 *wl)
        return ret;
 }
 
-static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
+static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
+                                       struct wl12xx_vif *wlvif,
+                                       u8 hlid, u8 tx_pkts)
 {
        bool fw_ps, single_sta;
 
-       /* only regulate station links */
-       if (hlid < WL1271_AP_STA_HLID_START)
-               return;
-
        fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
        single_sta = (wl->active_sta_count == 1);
 
@@ -784,7 +716,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
         * packets in FW or if the STA is awake.
         */
        if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
-               wl1271_ps_link_end(wl, hlid);
+               wl12xx_ps_link_end(wl, wlvif, hlid);
 
        /*
         * Start high-level PS if the STA is asleep with enough blocks in FW.
@@ -792,24 +724,14 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
         * case FW-memory congestion is not a problem.
         */
        else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
-               wl1271_ps_link_start(wl, hlid, true);
-}
-
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
-{
-       int id;
-
-       /* global/broadcast "stations" are always active */
-       if (hlid < WL1271_AP_STA_HLID_START)
-               return true;
-
-       id = hlid - WL1271_AP_STA_HLID_START;
-       return test_bit(id, wl->ap_hlid_map);
+               wl12xx_ps_link_start(wl, wlvif, hlid, true);
 }
 
 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
+                                          struct wl12xx_vif *wlvif,
                                           struct wl12xx_fw_status *status)
 {
+       struct wl1271_link *lnk;
        u32 cur_fw_ps_map;
        u8 hlid, cnt;
 
@@ -825,25 +747,22 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
                wl->ap_fw_ps_map = cur_fw_ps_map;
        }
 
-       for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
-               if (!wl1271_is_active_sta(wl, hlid))
-                       continue;
-
-               cnt = status->tx_lnk_free_pkts[hlid] -
-                     wl->links[hlid].prev_freed_pkts;
+       for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
+               lnk = &wl->links[hlid];
+               cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts;
 
-               wl->links[hlid].prev_freed_pkts =
-                       status->tx_lnk_free_pkts[hlid];
-               wl->links[hlid].allocated_pkts -= cnt;
+               lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid];
+               lnk->allocated_pkts -= cnt;
 
-               wl12xx_irq_ps_regulate_link(wl, hlid,
-                                           wl->links[hlid].allocated_pkts);
+               wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
+                                           lnk->allocated_pkts);
        }
 }
 
 static void wl12xx_fw_status(struct wl1271 *wl,
                             struct wl12xx_fw_status *status)
 {
+       struct wl12xx_vif *wlvif;
        struct timespec ts;
        u32 old_tx_blk_count = wl->tx_blocks_available;
        int avail, freed_blocks;
@@ -898,8 +817,9 @@ static void wl12xx_fw_status(struct wl1271 *wl,
                clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 
        /* for AP update num of allocated TX blocks per link and ps status */
-       if (wl->bss_type == BSS_TYPE_AP_BSS)
-               wl12xx_irq_update_links_status(wl, status);
+       wl12xx_for_each_wlvif_ap(wl, wlvif) {
+               wl12xx_irq_update_links_status(wl, wlvif, status);
+       }
 
        /* update the host-chipset time offset */
        getnstimeofday(&ts);
@@ -932,7 +852,7 @@ static void wl1271_netstack_work(struct work_struct *work)
 
 #define WL1271_IRQ_MAX_LOOPS 256
 
-irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_irq(int irq, void *cookie)
 {
        int ret;
        u32 intr;
@@ -1054,7 +974,6 @@ out:
 
        return IRQ_HANDLED;
 }
-EXPORT_SYMBOL_GPL(wl1271_irq);
 
 static int wl1271_fetch_firmware(struct wl1271 *wl)
 {
@@ -1069,10 +988,10 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
 
        wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
 
-       ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
+       ret = request_firmware(&fw, fw_name, wl->dev);
 
        if (ret < 0) {
-               wl1271_error("could not get firmware: %d", ret);
+               wl1271_error("could not get firmware %s: %d", fw_name, ret);
                return ret;
        }
 
@@ -1107,10 +1026,11 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
        const struct firmware *fw;
        int ret;
 
-       ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
+       ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
 
        if (ret < 0) {
-               wl1271_error("could not get nvs file: %d", ret);
+               wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
+                            ret);
                return ret;
        }
 
@@ -1217,11 +1137,13 @@ static void wl1271_recovery_work(struct work_struct *work)
 {
        struct wl1271 *wl =
                container_of(work, struct wl1271, recovery_work);
+       struct wl12xx_vif *wlvif;
+       struct ieee80211_vif *vif;
 
        mutex_lock(&wl->mutex);
 
        if (wl->state != WL1271_STATE_ON)
-               goto out;
+               goto out_unlock;
 
        /* Avoid a recursive recovery */
        set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1238,9 +1160,12 @@ static void wl1271_recovery_work(struct work_struct *work)
         * in the firmware during recovery. This doens't hurt if the network is
         * not encrypted.
         */
-       if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
-           test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
-               wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING;
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+                   test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
+                       wlvif->tx_security_seq +=
+                               WL1271_TX_SQN_POST_RECOVERY_PADDING;
+       }
 
        /* Prevent spurious TX during FW restart */
        ieee80211_stop_queues(wl->hw);
@@ -1251,7 +1176,14 @@ static void wl1271_recovery_work(struct work_struct *work)
        }
 
        /* reboot the chipset */
-       __wl1271_op_remove_interface(wl, false);
+       while (!list_empty(&wl->wlvif_list)) {
+               wlvif = list_first_entry(&wl->wlvif_list,
+                                      struct wl12xx_vif, list);
+               vif = wl12xx_wlvif_to_vif(wlvif);
+               __wl1271_op_remove_interface(wl, vif, false);
+       }
+       mutex_unlock(&wl->mutex);
+       wl1271_op_stop(wl->hw);
 
        clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
 
@@ -1262,8 +1194,8 @@ static void wl1271_recovery_work(struct work_struct *work)
         * to restart the HW.
         */
        ieee80211_wake_queues(wl->hw);
-
-out:
+       return;
+out_unlock:
        mutex_unlock(&wl->mutex);
 }
 
@@ -1318,7 +1250,16 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
        /* 0. read chip id from CHIP_ID */
        wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
 
-       /* 1. check if chip id is valid */
+       /*
+        * For wl127x based devices we could use the default block
+        * size (512 bytes), but due to a bug in the sdio driver, we
+        * need to set it explicitly after the chip is powered on.  To
+        * simplify the code and since the performance impact is
+        * negligible, we use the same block size for all different
+        * chip types.
+        */
+       if (!wl1271_set_block_size(wl))
+               wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
 
        switch (wl->chip.id) {
        case CHIP_ID_1271_PG10:
@@ -1328,7 +1269,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
                ret = wl1271_setup(wl);
                if (ret < 0)
                        goto out;
+               wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
                break;
+
        case CHIP_ID_1271_PG20:
                wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
                             wl->chip.id);
@@ -1336,7 +1279,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
                ret = wl1271_setup(wl);
                if (ret < 0)
                        goto out;
+               wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
                break;
+
        case CHIP_ID_1283_PG20:
                wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
                             wl->chip.id);
@@ -1344,9 +1289,6 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
                ret = wl1271_setup(wl);
                if (ret < 0)
                        goto out;
-
-               if (wl1271_set_block_size(wl))
-                       wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
                break;
        case CHIP_ID_1283_PG10:
        default:
@@ -1389,8 +1331,6 @@ int wl1271_plt_start(struct wl1271 *wl)
                goto out;
        }
 
-       wl->bss_type = BSS_TYPE_STA_BSS;
-
        while (retries) {
                retries--;
                ret = wl1271_chip_wakeup(wl);
@@ -1482,33 +1422,34 @@ int wl1271_plt_stop(struct wl1271 *wl)
 static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct wl1271 *wl = hw->priv;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_vif *vif = info->control.vif;
+       struct wl12xx_vif *wlvif = NULL;
        unsigned long flags;
        int q, mapping;
-       u8 hlid = 0;
+       u8 hlid;
+
+       if (vif)
+               wlvif = wl12xx_vif_to_data(vif);
 
        mapping = skb_get_queue_mapping(skb);
        q = wl1271_tx_get_queue(mapping);
 
-       if (wl->bss_type == BSS_TYPE_AP_BSS)
-               hlid = wl12xx_tx_get_hlid_ap(wl, skb);
+       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
 
        spin_lock_irqsave(&wl->wl_lock, flags);
 
        /* queue the packet */
-       if (wl->bss_type == BSS_TYPE_AP_BSS) {
-               if (!wl1271_is_active_sta(wl, hlid)) {
-                       wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d",
-                                    hlid, q);
-                       dev_kfree_skb(skb);
-                       goto out;
-               }
-
-               wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
-               skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
-       } else {
-               skb_queue_tail(&wl->tx_queue[q], skb);
+       if (hlid == WL12XX_INVALID_LINK_ID ||
+           (wlvif && !test_bit(hlid, wlvif->links_map))) {
+               wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
+               ieee80211_free_txskb(hw, skb);
+               goto out;
        }
 
+       wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+       skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+
        wl->tx_queue_count[q]++;
 
        /*
@@ -1609,13 +1550,14 @@ static struct notifier_block wl1271_dev_notifier = {
 };
 
 #ifdef CONFIG_PM
-static int wl1271_configure_suspend_sta(struct wl1271 *wl)
+static int wl1271_configure_suspend_sta(struct wl1271 *wl,
+                                       struct wl12xx_vif *wlvif)
 {
        int ret = 0;
 
        mutex_lock(&wl->mutex);
 
-       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                goto out_unlock;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -1623,12 +1565,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
                goto out_unlock;
 
        /* enter psm if needed*/
-       if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+       if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
                DECLARE_COMPLETION_ONSTACK(compl);
 
-               wl->ps_compl = &compl;
-               ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
-                                  wl->basic_rate, true);
+               wlvif->ps_compl = &compl;
+               ret = wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+                                  wlvif->basic_rate, true);
                if (ret < 0)
                        goto out_sleep;
 
@@ -1638,42 +1580,43 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
 
                ret = wait_for_completion_timeout(
                        &compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT));
+
+               mutex_lock(&wl->mutex);
                if (ret <= 0) {
                        wl1271_warning("couldn't enter ps mode!");
                        ret = -EBUSY;
-                       goto out;
+                       goto out_cleanup;
                }
 
-               /* take mutex again, and wakeup */
-               mutex_lock(&wl->mutex);
-
                ret = wl1271_ps_elp_wakeup(wl);
                if (ret < 0)
-                       goto out_unlock;
+                       goto out_cleanup;
        }
 out_sleep:
        wl1271_ps_elp_sleep(wl);
+out_cleanup:
+       wlvif->ps_compl = NULL;
 out_unlock:
        mutex_unlock(&wl->mutex);
-out:
        return ret;
 
 }
 
-static int wl1271_configure_suspend_ap(struct wl1271 *wl)
+static int wl1271_configure_suspend_ap(struct wl1271 *wl,
+                                      struct wl12xx_vif *wlvif)
 {
        int ret = 0;
 
        mutex_lock(&wl->mutex);
 
-       if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
                goto out_unlock;
 
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out_unlock;
 
-       ret = wl1271_acx_beacon_filter_opt(wl, true);
+       ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
 
        wl1271_ps_elp_sleep(wl);
 out_unlock:
@@ -1682,20 +1625,22 @@ out_unlock:
 
 }
 
-static int wl1271_configure_suspend(struct wl1271 *wl)
+static int wl1271_configure_suspend(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif)
 {
-       if (wl->bss_type == BSS_TYPE_STA_BSS)
-               return wl1271_configure_suspend_sta(wl);
-       if (wl->bss_type == BSS_TYPE_AP_BSS)
-               return wl1271_configure_suspend_ap(wl);
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+               return wl1271_configure_suspend_sta(wl, wlvif);
+       if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+               return wl1271_configure_suspend_ap(wl, wlvif);
        return 0;
 }
 
-static void wl1271_configure_resume(struct wl1271 *wl)
+static void wl1271_configure_resume(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif)
 {
        int ret;
-       bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS;
-       bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS;
+       bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+       bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
 
        if (!is_sta && !is_ap)
                return;
@@ -1707,11 +1652,11 @@ static void wl1271_configure_resume(struct wl1271 *wl)
 
        if (is_sta) {
                /* exit psm if it wasn't configured */
-               if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags))
-                       wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
-                                          wl->basic_rate, true);
+               if (!test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags))
+                       wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+                                          wlvif->basic_rate, true);
        } else if (is_ap) {
-               wl1271_acx_beacon_filter_opt(wl, false);
+               wl1271_acx_beacon_filter_opt(wl, wlvif, false);
        }
 
        wl1271_ps_elp_sleep(wl);
@@ -1723,16 +1668,19 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
                            struct cfg80211_wowlan *wow)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
        int ret;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
        WARN_ON(!wow || !wow->any);
 
        wl->wow_enabled = true;
-       ret = wl1271_configure_suspend(wl);
-       if (ret < 0) {
-               wl1271_warning("couldn't prepare device to suspend");
-               return ret;
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               ret = wl1271_configure_suspend(wl, wlvif);
+               if (ret < 0) {
+                       wl1271_warning("couldn't prepare device to suspend");
+                       return ret;
+               }
        }
        /* flush any remaining work */
        wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
@@ -1751,7 +1699,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
 
        wl1271_enable_interrupts(wl);
        flush_work(&wl->tx_work);
-       flush_delayed_work(&wl->pspoll_work);
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               flush_delayed_work(&wlvif->pspoll_work);
+       }
        flush_delayed_work(&wl->elp_work);
 
        return 0;
@@ -1760,6 +1710,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
 static int wl1271_op_resume(struct ieee80211_hw *hw)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
        unsigned long flags;
        bool run_irq_work = false;
 
@@ -1783,7 +1734,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
                wl1271_irq(0, wl);
                wl1271_enable_interrupts(wl);
        }
-       wl1271_configure_resume(wl);
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               wl1271_configure_resume(wl, wlvif);
+       }
        wl->wow_enabled = false;
 
        return 0;
@@ -1810,20 +1763,119 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
 
 static void wl1271_op_stop(struct ieee80211_hw *hw)
 {
+       struct wl1271 *wl = hw->priv;
+       int i;
+
        wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+
+       mutex_lock(&wl->mutex);
+       if (wl->state == WL1271_STATE_OFF) {
+               mutex_unlock(&wl->mutex);
+               return;
+       }
+       /*
+        * this must be before the cancel_work calls below, so that the work
+        * functions don't perform further work.
+        */
+       wl->state = WL1271_STATE_OFF;
+       mutex_unlock(&wl->mutex);
+
+       mutex_lock(&wl_list_mutex);
+       list_del(&wl->list);
+       mutex_unlock(&wl_list_mutex);
+
+       wl1271_disable_interrupts(wl);
+       wl1271_flush_deferred_work(wl);
+       cancel_delayed_work_sync(&wl->scan_complete_work);
+       cancel_work_sync(&wl->netstack_work);
+       cancel_work_sync(&wl->tx_work);
+       cancel_delayed_work_sync(&wl->elp_work);
+
+       /* let's notify MAC80211 about the remaining pending TX frames */
+       wl12xx_tx_reset(wl, true);
+       mutex_lock(&wl->mutex);
+
+       wl1271_power_off(wl);
+
+       wl->band = IEEE80211_BAND_2GHZ;
+
+       wl->rx_counter = 0;
+       wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
+       wl->tx_blocks_available = 0;
+       wl->tx_allocated_blocks = 0;
+       wl->tx_results_count = 0;
+       wl->tx_packets_count = 0;
+       wl->time_offset = 0;
+       wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+       wl->ap_fw_ps_map = 0;
+       wl->ap_ps_map = 0;
+       wl->sched_scanning = false;
+       memset(wl->roles_map, 0, sizeof(wl->roles_map));
+       memset(wl->links_map, 0, sizeof(wl->links_map));
+       memset(wl->roc_map, 0, sizeof(wl->roc_map));
+       wl->active_sta_count = 0;
+
+       /* The system link is always allocated */
+       __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
+
+       /*
+        * this is performed after the cancel_work calls and the associated
+        * mutex_lock, so that wl1271_op_add_interface does not accidentally
+        * get executed before all these vars have been reset.
+        */
+       wl->flags = 0;
+
+       wl->tx_blocks_freed = 0;
+
+       for (i = 0; i < NUM_TX_QUEUES; i++) {
+               wl->tx_pkts_freed[i] = 0;
+               wl->tx_allocated_pkts[i] = 0;
+       }
+
+       wl1271_debugfs_reset(wl);
+
+       kfree(wl->fw_status);
+       wl->fw_status = NULL;
+       kfree(wl->tx_res_if);
+       wl->tx_res_if = NULL;
+       kfree(wl->target_mem_map);
+       wl->target_mem_map = NULL;
+
+       mutex_unlock(&wl->mutex);
+}
+
+static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
+{
+       u8 policy = find_first_zero_bit(wl->rate_policies_map,
+                                       WL12XX_MAX_RATE_POLICIES);
+       if (policy >= WL12XX_MAX_RATE_POLICIES)
+               return -EBUSY;
+
+       __set_bit(policy, wl->rate_policies_map);
+       *idx = policy;
+       return 0;
 }
 
-static u8 wl12xx_get_role_type(struct wl1271 *wl)
+static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
 {
-       switch (wl->bss_type) {
+       if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
+               return;
+
+       __clear_bit(*idx, wl->rate_policies_map);
+       *idx = WL12XX_MAX_RATE_POLICIES;
+}
+
+static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       switch (wlvif->bss_type) {
        case BSS_TYPE_AP_BSS:
-               if (wl->p2p)
+               if (wlvif->p2p)
                        return WL1271_ROLE_P2P_GO;
                else
                        return WL1271_ROLE_AP;
 
        case BSS_TYPE_STA_BSS:
-               if (wl->p2p)
+               if (wlvif->p2p)
                        return WL1271_ROLE_P2P_CL;
                else
                        return WL1271_ROLE_STA;
@@ -1832,78 +1884,95 @@ static u8 wl12xx_get_role_type(struct wl1271 *wl)
                return WL1271_ROLE_IBSS;
 
        default:
-               wl1271_error("invalid bss_type: %d", wl->bss_type);
+               wl1271_error("invalid bss_type: %d", wlvif->bss_type);
        }
        return WL12XX_INVALID_ROLE_TYPE;
 }
 
-static int wl1271_op_add_interface(struct ieee80211_hw *hw,
-                                  struct ieee80211_vif *vif)
+static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
-       struct wl1271 *wl = hw->priv;
-       struct wiphy *wiphy = hw->wiphy;
-       int retries = WL1271_BOOT_RETRIES;
-       int ret = 0;
-       u8 role_type;
-       bool booted = false;
-
-       wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
-                    ieee80211_vif_type_p2p(vif), vif->addr);
-
-       mutex_lock(&wl->mutex);
-       if (wl->vif) {
-               wl1271_debug(DEBUG_MAC80211,
-                            "multiple vifs are not supported yet");
-               ret = -EBUSY;
-               goto out;
-       }
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       int i;
 
-       /*
-        * in some very corner case HW recovery scenarios its possible to
-        * get here before __wl1271_op_remove_interface is complete, so
-        * opt out if that is the case.
-        */
-       if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) {
-               ret = -EBUSY;
-               goto out;
-       }
+       /* clear everything but the persistent data */
+       memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
 
        switch (ieee80211_vif_type_p2p(vif)) {
        case NL80211_IFTYPE_P2P_CLIENT:
-               wl->p2p = 1;
+               wlvif->p2p = 1;
                /* fall-through */
        case NL80211_IFTYPE_STATION:
-               wl->bss_type = BSS_TYPE_STA_BSS;
-               wl->set_bss_type = BSS_TYPE_STA_BSS;
+               wlvif->bss_type = BSS_TYPE_STA_BSS;
                break;
        case NL80211_IFTYPE_ADHOC:
-               wl->bss_type = BSS_TYPE_IBSS;
-               wl->set_bss_type = BSS_TYPE_STA_BSS;
+               wlvif->bss_type = BSS_TYPE_IBSS;
                break;
        case NL80211_IFTYPE_P2P_GO:
-               wl->p2p = 1;
+               wlvif->p2p = 1;
                /* fall-through */
        case NL80211_IFTYPE_AP:
-               wl->bss_type = BSS_TYPE_AP_BSS;
+               wlvif->bss_type = BSS_TYPE_AP_BSS;
                break;
        default:
-               ret = -EOPNOTSUPP;
-               goto out;
+               wlvif->bss_type = MAX_BSS_TYPE;
+               return -EOPNOTSUPP;
        }
 
-       role_type = wl12xx_get_role_type(wl);
-       if (role_type == WL12XX_INVALID_ROLE_TYPE) {
-               ret = -EINVAL;
-               goto out;
+       wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+       wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+       wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+           wlvif->bss_type == BSS_TYPE_IBSS) {
+               /* init sta/ibss data */
+               wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+               wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+               wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+               wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+       } else {
+               /* init ap data */
+               wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+               wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+               wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+               wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+               for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+                       wl12xx_allocate_rate_policy(wl,
+                                               &wlvif->ap.ucast_rate_idx[i]);
        }
-       memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
 
-       if (wl->state != WL1271_STATE_OFF) {
-               wl1271_error("cannot start because not in off state: %d",
-                            wl->state);
-               ret = -EBUSY;
-               goto out;
-       }
+       wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+       wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
+       wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+       wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
+       wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
+       wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
+
+       /*
+        * mac80211 configures some values globally, while we treat them
+        * per-interface. thus, on init, we have to copy them from wl
+        */
+       wlvif->band = wl->band;
+       wlvif->channel = wl->channel;
+       wlvif->power_level = wl->power_level;
+
+       INIT_WORK(&wlvif->rx_streaming_enable_work,
+                 wl1271_rx_streaming_enable_work);
+       INIT_WORK(&wlvif->rx_streaming_disable_work,
+                 wl1271_rx_streaming_disable_work);
+       INIT_DELAYED_WORK(&wlvif->pspoll_work, wl1271_pspoll_work);
+       INIT_LIST_HEAD(&wlvif->list);
+
+       setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
+                   (unsigned long) wlvif);
+       return 0;
+}
+
+static bool wl12xx_init_fw(struct wl1271 *wl)
+{
+       int retries = WL1271_BOOT_RETRIES;
+       bool booted = false;
+       struct wiphy *wiphy = wl->hw->wiphy;
+       int ret;
 
        while (retries) {
                retries--;
@@ -1915,25 +1984,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                if (ret < 0)
                        goto power_off;
 
-               if (wl->bss_type == BSS_TYPE_STA_BSS ||
-                   wl->bss_type == BSS_TYPE_IBSS) {
-                       /*
-                        * The device role is a special role used for
-                        * rx and tx frames prior to association (as
-                        * the STA role can get packets only from
-                        * its associated bssid)
-                        */
-                       ret = wl12xx_cmd_role_enable(wl,
-                                                        WL1271_ROLE_DEVICE,
-                                                        &wl->dev_role_id);
-                       if (ret < 0)
-                               goto irq_disable;
-               }
-
-               ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id);
-               if (ret < 0)
-                       goto irq_disable;
-
                ret = wl1271_hw_init(wl);
                if (ret < 0)
                        goto irq_disable;
@@ -1964,9 +2014,6 @@ power_off:
                goto out;
        }
 
-       wl->vif = vif;
-       wl->state = WL1271_STATE_ON;
-       set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags);
        wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
 
        /* update hw/fw version info in wiphy struct */
@@ -1984,7 +2031,110 @@ power_off:
        wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
                     wl->enable_11a ? "" : "not ");
 
+       wl->state = WL1271_STATE_ON;
+out:
+       return booted;
+}
+
+static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif)
+{
+       struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       int ret = 0;
+       u8 role_type;
+       bool booted = false;
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
+                    ieee80211_vif_type_p2p(vif), vif->addr);
+
+       mutex_lock(&wl->mutex);
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out_unlock;
+
+       if (wl->vif) {
+               wl1271_debug(DEBUG_MAC80211,
+                            "multiple vifs are not supported yet");
+               ret = -EBUSY;
+               goto out;
+       }
+
+       /*
+        * in some very corner case HW recovery scenarios its possible to
+        * get here before __wl1271_op_remove_interface is complete, so
+        * opt out if that is the case.
+        */
+       if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
+           test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ret = wl12xx_init_vif_data(wl, vif);
+       if (ret < 0)
+               goto out;
+
+       wlvif->wl = wl;
+       role_type = wl12xx_get_role_type(wl, wlvif);
+       if (role_type == WL12XX_INVALID_ROLE_TYPE) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /*
+        * TODO: after the nvs issue will be solved, move this block
+        * to start(), and make sure here the driver is ON.
+        */
+       if (wl->state == WL1271_STATE_OFF) {
+               /*
+                * we still need this in order to configure the fw
+                * while uploading the nvs
+                */
+               memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
+
+               booted = wl12xx_init_fw(wl);
+               if (!booted) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+           wlvif->bss_type == BSS_TYPE_IBSS) {
+               /*
+                * The device role is a special role used for
+                * rx and tx frames prior to association (as
+                * the STA role can get packets only from
+                * its associated bssid)
+                */
+               ret = wl12xx_cmd_role_enable(wl, vif->addr,
+                                                WL1271_ROLE_DEVICE,
+                                                &wlvif->dev_role_id);
+               if (ret < 0)
+                       goto out;
+       }
+
+       ret = wl12xx_cmd_role_enable(wl, vif->addr,
+                                    role_type, &wlvif->role_id);
+       if (ret < 0)
+               goto out;
+
+       ret = wl1271_init_vif_specific(wl, vif);
+       if (ret < 0)
+               goto out;
+
+       wl->vif = vif;
+       list_add(&wlvif->list, &wl->wlvif_list);
+       set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
+
+       if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+               wl->ap_count++;
+       else
+               wl->sta_count++;
 out:
+       wl1271_ps_elp_sleep(wl);
+out_unlock:
        mutex_unlock(&wl->mutex);
 
        mutex_lock(&wl_list_mutex);
@@ -1996,29 +2146,34 @@ out:
 }
 
 static void __wl1271_op_remove_interface(struct wl1271 *wl,
+                                        struct ieee80211_vif *vif,
                                         bool reset_tx_queues)
 {
-       int ret, i;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       int i, ret;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
 
+       if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+               return;
+
+       wl->vif = NULL;
+
        /* because of hardware recovery, we may get here twice */
        if (wl->state != WL1271_STATE_ON)
                return;
 
        wl1271_info("down");
 
-       mutex_lock(&wl_list_mutex);
-       list_del(&wl->list);
-       mutex_unlock(&wl_list_mutex);
-
        /* enable dyn ps just in case (if left on due to fw crash etc) */
-       if (wl->bss_type == BSS_TYPE_STA_BSS)
-               ieee80211_enable_dyn_ps(wl->vif);
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+               ieee80211_enable_dyn_ps(vif);
 
-       if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
+       if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
+           wl->scan_vif == vif) {
                wl->scan.state = WL1271_SCAN_STATE_IDLE;
                memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+               wl->scan_vif = NULL;
                wl->scan.req = NULL;
                ieee80211_scan_completed(wl->hw, true);
        }
@@ -2029,13 +2184,13 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
                if (ret < 0)
                        goto deinit;
 
-               if (wl->bss_type == BSS_TYPE_STA_BSS) {
-                       ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id);
+               if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
+                       ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
                        if (ret < 0)
                                goto deinit;
                }
 
-               ret = wl12xx_cmd_role_disable(wl, &wl->role_id);
+               ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
                if (ret < 0)
                        goto deinit;
 
@@ -2043,120 +2198,82 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
        }
 deinit:
        /* clear all hlids (except system_hlid) */
-       wl->sta_hlid = WL12XX_INVALID_LINK_ID;
-       wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-       wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
-       wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
+       wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+           wlvif->bss_type == BSS_TYPE_IBSS) {
+               wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+               wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+               wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+               wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+       } else {
+               wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+               wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+               wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+               wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+               for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+                       wl12xx_free_rate_policy(wl,
+                                               &wlvif->ap.ucast_rate_idx[i]);
+       }
 
-       /*
-        * this must be before the cancel_work calls below, so that the work
-        * functions don't perform further work.
-        */
-       wl->state = WL1271_STATE_OFF;
+       wl12xx_tx_reset_wlvif(wl, wlvif);
+       wl1271_free_ap_keys(wl, wlvif);
+       if (wl->last_wlvif == wlvif)
+               wl->last_wlvif = NULL;
+       list_del(&wlvif->list);
+       memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
+       wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+       wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+
+       if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+               wl->ap_count--;
+       else
+               wl->sta_count--;
 
        mutex_unlock(&wl->mutex);
-
-       wl1271_disable_interrupts(wl);
-       wl1271_flush_deferred_work(wl);
-       cancel_delayed_work_sync(&wl->scan_complete_work);
-       cancel_work_sync(&wl->netstack_work);
-       cancel_work_sync(&wl->tx_work);
-       del_timer_sync(&wl->rx_streaming_timer);
-       cancel_work_sync(&wl->rx_streaming_enable_work);
-       cancel_work_sync(&wl->rx_streaming_disable_work);
-       cancel_delayed_work_sync(&wl->pspoll_work);
-       cancel_delayed_work_sync(&wl->elp_work);
+       del_timer_sync(&wlvif->rx_streaming_timer);
+       cancel_work_sync(&wlvif->rx_streaming_enable_work);
+       cancel_work_sync(&wlvif->rx_streaming_disable_work);
+       cancel_delayed_work_sync(&wlvif->pspoll_work);
 
        mutex_lock(&wl->mutex);
-
-       /* let's notify MAC80211 about the remaining pending TX frames */
-       wl1271_tx_reset(wl, reset_tx_queues);
-       wl1271_power_off(wl);
-
-       memset(wl->bssid, 0, ETH_ALEN);
-       memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
-       wl->ssid_len = 0;
-       wl->bss_type = MAX_BSS_TYPE;
-       wl->set_bss_type = MAX_BSS_TYPE;
-       wl->p2p = 0;
-       wl->band = IEEE80211_BAND_2GHZ;
-
-       wl->rx_counter = 0;
-       wl->psm_entry_retry = 0;
-       wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
-       wl->tx_blocks_available = 0;
-       wl->tx_allocated_blocks = 0;
-       wl->tx_results_count = 0;
-       wl->tx_packets_count = 0;
-       wl->time_offset = 0;
-       wl->session_counter = 0;
-       wl->rate_set = CONF_TX_RATE_MASK_BASIC;
-       wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
-       wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
-       wl->vif = NULL;
-       wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
-       wl1271_free_ap_keys(wl);
-       memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
-       wl->ap_fw_ps_map = 0;
-       wl->ap_ps_map = 0;
-       wl->sched_scanning = false;
-       wl->role_id = WL12XX_INVALID_ROLE_ID;
-       wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
-       memset(wl->roles_map, 0, sizeof(wl->roles_map));
-       memset(wl->links_map, 0, sizeof(wl->links_map));
-       memset(wl->roc_map, 0, sizeof(wl->roc_map));
-       wl->active_sta_count = 0;
-
-       /* The system link is always allocated */
-       __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
-
-       /*
-        * this is performed after the cancel_work calls and the associated
-        * mutex_lock, so that wl1271_op_add_interface does not accidentally
-        * get executed before all these vars have been reset.
-        */
-       wl->flags = 0;
-
-       wl->tx_blocks_freed = 0;
-
-       for (i = 0; i < NUM_TX_QUEUES; i++) {
-               wl->tx_pkts_freed[i] = 0;
-               wl->tx_allocated_pkts[i] = 0;
-       }
-
-       wl1271_debugfs_reset(wl);
-
-       kfree(wl->fw_status);
-       wl->fw_status = NULL;
-       kfree(wl->tx_res_if);
-       wl->tx_res_if = NULL;
-       kfree(wl->target_mem_map);
-       wl->target_mem_map = NULL;
 }
 
 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
                                       struct ieee80211_vif *vif)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       struct wl12xx_vif *iter;
 
        mutex_lock(&wl->mutex);
+
+       if (wl->state == WL1271_STATE_OFF ||
+           !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+               goto out;
+
        /*
         * wl->vif can be null here if someone shuts down the interface
         * just when hardware recovery has been started.
         */
-       if (wl->vif) {
-               WARN_ON(wl->vif != vif);
-               __wl1271_op_remove_interface(wl, true);
-       }
+       wl12xx_for_each_wlvif(wl, iter) {
+               if (iter != wlvif)
+                       continue;
 
+               __wl1271_op_remove_interface(wl, vif, true);
+               break;
+       }
+       WARN_ON(iter != wlvif);
+out:
        mutex_unlock(&wl->mutex);
        cancel_work_sync(&wl->recovery_work);
 }
 
-static int wl1271_join(struct wl1271 *wl, bool set_assoc)
+static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         bool set_assoc)
 {
        int ret;
-       bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+       bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
 
        /*
         * One of the side effects of the JOIN command is that is clears
@@ -2167,20 +2284,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
         * Keep the below message for now, unless it starts bothering
         * users who really like to roam a lot :)
         */
-       if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+       if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                wl1271_info("JOIN while associated.");
 
        if (set_assoc)
-               set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
+               set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
 
        if (is_ibss)
-               ret = wl12xx_cmd_role_start_ibss(wl);
+               ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
        else
-               ret = wl12xx_cmd_role_start_sta(wl);
+               ret = wl12xx_cmd_role_start_sta(wl, wlvif);
        if (ret < 0)
                goto out;
 
-       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                goto out;
 
        /*
@@ -2189,19 +2306,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
         * the join. The acx_aid starts the keep-alive process, and the order
         * of the commands below is relevant.
         */
-       ret = wl1271_acx_keep_alive_mode(wl, true);
+       ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
        if (ret < 0)
                goto out;
 
-       ret = wl1271_acx_aid(wl, wl->aid);
+       ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
        if (ret < 0)
                goto out;
 
-       ret = wl1271_cmd_build_klv_null_data(wl);
+       ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
        if (ret < 0)
                goto out;
 
-       ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+       ret = wl1271_acx_keep_alive_config(wl, wlvif,
+                                          CMD_TEMPL_KLV_IDX_NULL_DATA,
                                           ACX_KEEP_ALIVE_TPL_VALID);
        if (ret < 0)
                goto out;
@@ -2210,34 +2328,34 @@ out:
        return ret;
 }
 
-static int wl1271_unjoin(struct wl1271 *wl)
+static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
 
-       if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) {
+       if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
+               struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
                wl12xx_cmd_stop_channel_switch(wl);
-               ieee80211_chswitch_done(wl->vif, false);
+               ieee80211_chswitch_done(vif, false);
        }
 
        /* to stop listening to a channel, we disconnect */
-       ret = wl12xx_cmd_role_stop_sta(wl);
+       ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
        if (ret < 0)
                goto out;
 
-       memset(wl->bssid, 0, ETH_ALEN);
-
        /* reset TX security counters on a clean disconnect */
-       wl->tx_security_last_seq_lsb = 0;
-       wl->tx_security_seq = 0;
+       wlvif->tx_security_last_seq_lsb = 0;
+       wlvif->tx_security_seq = 0;
 
 out:
        return ret;
 }
 
-static void wl1271_set_band_rate(struct wl1271 *wl)
+static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
-       wl->basic_rate_set = wl->bitrate_masks[wl->band];
-       wl->rate_set = wl->basic_rate_set;
+       wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
+       wlvif->rate_set = wlvif->basic_rate_set;
 }
 
 static bool wl12xx_is_roc(struct wl1271 *wl)
@@ -2251,27 +2369,25 @@ static bool wl12xx_is_roc(struct wl1271 *wl)
        return true;
 }
 
-static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                 bool idle)
 {
        int ret;
 
        if (idle) {
                /* no need to croc if we weren't busy (e.g. during boot) */
                if (wl12xx_is_roc(wl)) {
-                       ret = wl12xx_croc(wl, wl->dev_role_id);
-                       if (ret < 0)
-                               goto out;
-
-                       ret = wl12xx_cmd_role_stop_dev(wl);
+                       ret = wl12xx_stop_dev(wl, wlvif);
                        if (ret < 0)
                                goto out;
                }
-               wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-               ret = wl1271_acx_sta_rate_policies(wl);
+               wlvif->rate_set =
+                       wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+               ret = wl1271_acx_sta_rate_policies(wl, wlvif);
                if (ret < 0)
                        goto out;
                ret = wl1271_acx_keep_alive_config(
-                       wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+                       wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
                        ACX_KEEP_ALIVE_TPL_INVALID);
                if (ret < 0)
                        goto out;
@@ -2283,75 +2399,32 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
                        ieee80211_sched_scan_stopped(wl->hw);
                }
 
-               ret = wl12xx_cmd_role_start_dev(wl);
-               if (ret < 0)
-                       goto out;
-
-               ret = wl12xx_roc(wl, wl->dev_role_id);
-               if (ret < 0)
-                       goto out;
-               clear_bit(WL1271_FLAG_IDLE, &wl->flags);
-       }
-
-out:
-       return ret;
-}
-
-static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
-{
-       struct wl1271 *wl = hw->priv;
-       struct ieee80211_conf *conf = &hw->conf;
-       int channel, ret = 0;
-       bool is_ap;
-
-       channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
-
-       wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
-                    " changed 0x%x",
-                    channel,
-                    conf->flags & IEEE80211_CONF_PS ? "on" : "off",
-                    conf->power_level,
-                    conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
-                        changed);
-
-       /*
-        * mac80211 will go to idle nearly immediately after transmitting some
-        * frames, such as the deauth. To make sure those frames reach the air,
-        * wait here until the TX queue is fully flushed.
-        */
-       if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
-           (conf->flags & IEEE80211_CONF_IDLE))
-               wl1271_tx_flush(wl);
-
-       mutex_lock(&wl->mutex);
-
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
-               /* we support configuring the channel and band while off */
-               if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
-                       wl->band = conf->channel->band;
-                       wl->channel = channel;
-               }
-
-               if ((changed & IEEE80211_CONF_CHANGE_POWER))
-                       wl->power_level = conf->power_level;
-
-               goto out;
+               ret = wl12xx_start_dev(wl, wlvif);
+               if (ret < 0)
+                       goto out;
+               clear_bit(WL1271_FLAG_IDLE, &wl->flags);
        }
 
-       is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+out:
+       return ret;
+}
+
+static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            struct ieee80211_conf *conf, u32 changed)
+{
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
+       int channel, ret;
 
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto out;
+       channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
 
        /* if the channel changes while joined, join again */
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
-           ((wl->band != conf->channel->band) ||
-            (wl->channel != channel))) {
+           ((wlvif->band != conf->channel->band) ||
+            (wlvif->channel != channel))) {
                /* send all pending packets */
                wl1271_tx_work_locked(wl);
-               wl->band = conf->channel->band;
-               wl->channel = channel;
+               wlvif->band = conf->channel->band;
+               wlvif->channel = channel;
 
                if (!is_ap) {
                        /*
@@ -2360,24 +2433,27 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
                         * possible rate for the band as a fixed rate for
                         * association frames and other control messages.
                         */
-                       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
-                               wl1271_set_band_rate(wl);
+                       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+                               wl1271_set_band_rate(wl, wlvif);
 
-                       wl->basic_rate =
-                               wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-                       ret = wl1271_acx_sta_rate_policies(wl);
+                       wlvif->basic_rate =
+                               wl1271_tx_min_rate_get(wl,
+                                                      wlvif->basic_rate_set);
+                       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
                        if (ret < 0)
                                wl1271_warning("rate policy for channel "
                                               "failed %d", ret);
 
-                       if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+                       if (test_bit(WLVIF_FLAG_STA_ASSOCIATED,
+                                    &wlvif->flags)) {
                                if (wl12xx_is_roc(wl)) {
                                        /* roaming */
-                                       ret = wl12xx_croc(wl, wl->dev_role_id);
+                                       ret = wl12xx_croc(wl,
+                                                         wlvif->dev_role_id);
                                        if (ret < 0)
-                                               goto out_sleep;
+                                               return ret;
                                }
-                               ret = wl1271_join(wl, false);
+                               ret = wl1271_join(wl, wlvif, false);
                                if (ret < 0)
                                        wl1271_warning("cmd join on channel "
                                                       "failed %d", ret);
@@ -2389,64 +2465,112 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
                                 */
                                if (wl12xx_is_roc(wl) &&
                                    !(conf->flags & IEEE80211_CONF_IDLE)) {
-                                       ret = wl12xx_croc(wl, wl->dev_role_id);
+                                       ret = wl12xx_stop_dev(wl, wlvif);
                                        if (ret < 0)
-                                               goto out_sleep;
+                                               return ret;
 
-                                       ret = wl12xx_roc(wl, wl->dev_role_id);
+                                       ret = wl12xx_start_dev(wl, wlvif);
                                        if (ret < 0)
-                                               wl1271_warning("roc failed %d",
-                                                              ret);
+                                               return ret;
                                }
                        }
                }
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
-               ret = wl1271_sta_handle_idle(wl,
-                                       conf->flags & IEEE80211_CONF_IDLE);
-               if (ret < 0)
-                       wl1271_warning("idle mode change failed %d", ret);
-       }
-
        /*
         * if mac80211 changes the PSM mode, make sure the mode is not
         * incorrectly changed after the pspoll failure active window.
         */
        if (changed & IEEE80211_CONF_CHANGE_PS)
-               clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
+               clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
 
        if (conf->flags & IEEE80211_CONF_PS &&
-           !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
-               set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+           !test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
+               set_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
 
                /*
                 * We enter PSM only if we're already associated.
                 * If we're not, we'll enter it when joining an SSID,
                 * through the bss_info_changed() hook.
                 */
-               if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+               if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
                        wl1271_debug(DEBUG_PSM, "psm enabled");
-                       ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
-                                                wl->basic_rate, true);
+                       ret = wl1271_ps_set_mode(wl, wlvif,
+                                                STATION_POWER_SAVE_MODE,
+                                                wlvif->basic_rate, true);
                }
        } else if (!(conf->flags & IEEE80211_CONF_PS) &&
-                  test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
+                  test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
                wl1271_debug(DEBUG_PSM, "psm disabled");
 
-               clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+               clear_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
 
-               if (test_bit(WL1271_FLAG_PSM, &wl->flags))
-                       ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
-                                                wl->basic_rate, true);
+               if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags))
+                       ret = wl1271_ps_set_mode(wl, wlvif,
+                                                STATION_ACTIVE_MODE,
+                                                wlvif->basic_rate, true);
        }
 
-       if (conf->power_level != wl->power_level) {
-               ret = wl1271_acx_tx_power(wl, conf->power_level);
+       if (conf->power_level != wlvif->power_level) {
+               ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
                if (ret < 0)
-                       goto out_sleep;
+                       return ret;
+
+               wlvif->power_level = conf->power_level;
+       }
+
+       return 0;
+}
+
+static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
+       struct ieee80211_conf *conf = &hw->conf;
+       int channel, ret = 0;
+
+       channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+                    " changed 0x%x",
+                    channel,
+                    conf->flags & IEEE80211_CONF_PS ? "on" : "off",
+                    conf->power_level,
+                    conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+                        changed);
+
+       /*
+        * mac80211 will go to idle nearly immediately after transmitting some
+        * frames, such as the deauth. To make sure those frames reach the air,
+        * wait here until the TX queue is fully flushed.
+        */
+       if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+           (conf->flags & IEEE80211_CONF_IDLE))
+               wl1271_tx_flush(wl);
+
+       mutex_lock(&wl->mutex);
+
+       /* we support configuring the channel and band even while off */
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               wl->band = conf->channel->band;
+               wl->channel = channel;
+       }
 
+       if (changed & IEEE80211_CONF_CHANGE_POWER)
                wl->power_level = conf->power_level;
+
+       if (unlikely(wl->state == WL1271_STATE_OFF))
+               goto out;
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       /* configure each interface */
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               ret = wl12xx_config_vif(wl, wlvif, conf, changed);
+               if (ret < 0)
+                       goto out_sleep;
        }
 
 out_sleep:
@@ -2509,6 +2633,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
 {
        struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
+
        int ret;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
@@ -2526,15 +2652,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
-       if (wl->bss_type != BSS_TYPE_AP_BSS) {
-               if (*total & FIF_ALLMULTI)
-                       ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
-               else if (fp)
-                       ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
-                                                          fp->mc_list,
-                                                          fp->mc_list_length);
-               if (ret < 0)
-                       goto out_sleep;
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+                       if (*total & FIF_ALLMULTI)
+                               ret = wl1271_acx_group_address_tbl(wl, wlvif,
+                                                                  false,
+                                                                  NULL, 0);
+                       else if (fp)
+                               ret = wl1271_acx_group_address_tbl(wl, wlvif,
+                                                       fp->enabled,
+                                                       fp->mc_list,
+                                                       fp->mc_list_length);
+                       if (ret < 0)
+                               goto out_sleep;
+               }
        }
 
        /*
@@ -2551,9 +2682,10 @@ out:
        kfree(fp);
 }
 
-static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
-                       u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
-                       u16 tx_seq_16)
+static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                               u8 id, u8 key_type, u8 key_size,
+                               const u8 *key, u8 hlid, u32 tx_seq_32,
+                               u16 tx_seq_16)
 {
        struct wl1271_ap_key *ap_key;
        int i;
@@ -2568,10 +2700,10 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
         * an existing key.
         */
        for (i = 0; i < MAX_NUM_KEYS; i++) {
-               if (wl->recorded_ap_keys[i] == NULL)
+               if (wlvif->ap.recorded_keys[i] == NULL)
                        break;
 
-               if (wl->recorded_ap_keys[i]->id == id) {
+               if (wlvif->ap.recorded_keys[i]->id == id) {
                        wl1271_warning("trying to record key replacement");
                        return -EINVAL;
                }
@@ -2592,21 +2724,21 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
        ap_key->tx_seq_32 = tx_seq_32;
        ap_key->tx_seq_16 = tx_seq_16;
 
-       wl->recorded_ap_keys[i] = ap_key;
+       wlvif->ap.recorded_keys[i] = ap_key;
        return 0;
 }
 
-static void wl1271_free_ap_keys(struct wl1271 *wl)
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int i;
 
        for (i = 0; i < MAX_NUM_KEYS; i++) {
-               kfree(wl->recorded_ap_keys[i]);
-               wl->recorded_ap_keys[i] = NULL;
+               kfree(wlvif->ap.recorded_keys[i]);
+               wlvif->ap.recorded_keys[i] = NULL;
        }
 }
 
-static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int i, ret = 0;
        struct wl1271_ap_key *key;
@@ -2614,15 +2746,15 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
 
        for (i = 0; i < MAX_NUM_KEYS; i++) {
                u8 hlid;
-               if (wl->recorded_ap_keys[i] == NULL)
+               if (wlvif->ap.recorded_keys[i] == NULL)
                        break;
 
-               key = wl->recorded_ap_keys[i];
+               key = wlvif->ap.recorded_keys[i];
                hlid = key->hlid;
                if (hlid == WL12XX_INVALID_LINK_ID)
-                       hlid = wl->ap_bcast_hlid;
+                       hlid = wlvif->ap.bcast_hlid;
 
-               ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+               ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
                                            key->id, key->key_type,
                                            key->key_size, key->key,
                                            hlid, key->tx_seq_32,
@@ -2635,23 +2767,24 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
        }
 
        if (wep_key_added) {
-               ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key,
-                                                    wl->ap_bcast_hlid);
+               ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
+                                                    wlvif->ap.bcast_hlid);
                if (ret < 0)
                        goto out;
        }
 
 out:
-       wl1271_free_ap_keys(wl);
+       wl1271_free_ap_keys(wl, wlvif);
        return ret;
 }
 
-static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u16 action, u8 id, u8 key_type,
                       u8 key_size, const u8 *key, u32 tx_seq_32,
                       u16 tx_seq_16, struct ieee80211_sta *sta)
 {
        int ret;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
 
        if (is_ap) {
                struct wl1271_station *wl_sta;
@@ -2661,10 +2794,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
                        wl_sta = (struct wl1271_station *)sta->drv_priv;
                        hlid = wl_sta->hlid;
                } else {
-                       hlid = wl->ap_bcast_hlid;
+                       hlid = wlvif->ap.bcast_hlid;
                }
 
-               if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+               if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
                        /*
                         * We do not support removing keys after AP shutdown.
                         * Pretend we do to make mac80211 happy.
@@ -2672,12 +2805,12 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
                        if (action != KEY_ADD_OR_REPLACE)
                                return 0;
 
-                       ret = wl1271_record_ap_key(wl, id,
+                       ret = wl1271_record_ap_key(wl, wlvif, id,
                                             key_type, key_size,
                                             key, hlid, tx_seq_32,
                                             tx_seq_16);
                } else {
-                       ret = wl1271_cmd_set_ap_key(wl, action,
+                       ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
                                             id, key_type, key_size,
                                             key, hlid, tx_seq_32,
                                             tx_seq_16);
@@ -2718,10 +2851,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
 
                /* don't remove key if hlid was already deleted */
                if (action == KEY_REMOVE &&
-                   wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+                   wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
                        return 0;
 
-               ret = wl1271_cmd_set_sta_key(wl, action,
+               ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
                                             id, key_type, key_size,
                                             key, addr, tx_seq_32,
                                             tx_seq_16);
@@ -2731,8 +2864,8 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
                /* the default WEP key needs to be configured at least once */
                if (key_type == KEY_WEP) {
                        ret = wl12xx_cmd_set_default_wep_key(wl,
-                                                            wl->default_key,
-                                                            wl->sta_hlid);
+                                                       wlvif->default_key,
+                                                       wlvif->sta.hlid);
                        if (ret < 0)
                                return ret;
                }
@@ -2747,6 +2880,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                             struct ieee80211_key_conf *key_conf)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret;
        u32 tx_seq_32 = 0;
        u16 tx_seq_16 = 0;
@@ -2782,20 +2916,20 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                key_type = KEY_TKIP;
 
                key_conf->hw_key_idx = key_conf->keyidx;
-               tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
-               tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+               tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+               tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                key_type = KEY_AES;
 
-               key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-               tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
-               tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+               key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+               tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+               tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
                break;
        case WL1271_CIPHER_SUITE_GEM:
                key_type = KEY_GEM;
-               tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
-               tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+               tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+               tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
                break;
        default:
                wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -2806,7 +2940,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        switch (cmd) {
        case SET_KEY:
-               ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+               ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
                                 key_conf->keyidx, key_type,
                                 key_conf->keylen, key_conf->key,
                                 tx_seq_32, tx_seq_16, sta);
@@ -2817,7 +2951,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                break;
 
        case DISABLE_KEY:
-               ret = wl1271_set_key(wl, KEY_REMOVE,
+               ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
                                     key_conf->keyidx, key_type,
                                     key_conf->keylen, key_conf->key,
                                     0, 0, sta);
@@ -2847,6 +2981,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
                             struct cfg80211_scan_request *req)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
        int ret;
        u8 *ssid = NULL;
        size_t len = 0;
@@ -2876,16 +3012,15 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
 
        /* cancel ROC before scanning */
        if (wl12xx_is_roc(wl)) {
-               if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+               if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
                        /* don't allow scanning right now */
                        ret = -EBUSY;
                        goto out_sleep;
                }
-               wl12xx_croc(wl, wl->dev_role_id);
-               wl12xx_cmd_role_stop_dev(wl);
+               wl12xx_stop_dev(wl, wlvif);
        }
 
-       ret = wl1271_scan(hw->priv, ssid, len, req);
+       ret = wl1271_scan(hw->priv, vif, ssid, len, req);
 out_sleep:
        wl1271_ps_elp_sleep(wl);
 out:
@@ -2921,6 +3056,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
        }
        wl->scan.state = WL1271_SCAN_STATE_IDLE;
        memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+       wl->scan_vif = NULL;
        wl->scan.req = NULL;
        ieee80211_scan_completed(wl->hw, true);
 
@@ -2938,6 +3074,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
                                      struct ieee80211_sched_scan_ies *ies)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret;
 
        wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
@@ -2948,11 +3085,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
-       ret = wl1271_scan_sched_scan_config(wl, req, ies);
+       ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
        if (ret < 0)
                goto out_sleep;
 
-       ret = wl1271_scan_sched_scan_start(wl);
+       ret = wl1271_scan_sched_scan_start(wl, wlvif);
        if (ret < 0)
                goto out_sleep;
 
@@ -3017,6 +3154,7 @@ out:
 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
        int ret = 0;
 
        mutex_lock(&wl->mutex);
@@ -3030,10 +3168,11 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
        if (ret < 0)
                goto out;
 
-       ret = wl1271_acx_rts_threshold(wl, value);
-       if (ret < 0)
-               wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret);
-
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               ret = wl1271_acx_rts_threshold(wl, wlvif, value);
+               if (ret < 0)
+                       wl1271_warning("set rts threshold failed: %d", ret);
+       }
        wl1271_ps_elp_sleep(wl);
 
 out:
@@ -3042,9 +3181,10 @@ out:
        return ret;
 }
 
-static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
                            int offset)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        u8 ssid_len;
        const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
                                         skb->len - offset);
@@ -3060,8 +3200,8 @@ static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
                return -EINVAL;
        }
 
-       wl->ssid_len = ssid_len;
-       memcpy(wl->ssid, ptr+2, ssid_len);
+       wlvif->ssid_len = ssid_len;
+       memcpy(wlvif->ssid, ptr+2, ssid_len);
        return 0;
 }
 
@@ -3096,18 +3236,40 @@ static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
        skb_trim(skb, skb->len - len);
 }
 
-static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
-                                        u8 *probe_rsp_data,
-                                        size_t probe_rsp_len,
-                                        u32 rates)
+static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
+                                        struct ieee80211_vif *vif)
+{
+       struct sk_buff *skb;
+       int ret;
+
+       skb = ieee80211_proberesp_get(wl->hw, vif);
+       if (!skb)
+               return -EOPNOTSUPP;
+
+       ret = wl1271_cmd_template_set(wl,
+                                     CMD_TEMPL_AP_PROBE_RESPONSE,
+                                     skb->data,
+                                     skb->len, 0,
+                                     rates);
+
+       dev_kfree_skb(skb);
+       return ret;
+}
+
+static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
+                                            struct ieee80211_vif *vif,
+                                            u8 *probe_rsp_data,
+                                            size_t probe_rsp_len,
+                                            u32 rates)
 {
-       struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
        u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
        int ssid_ie_offset, ie_offset, templ_len;
        const u8 *ptr;
 
        /* no need to change probe response if the SSID is set correctly */
-       if (wl->ssid_len > 0)
+       if (wlvif->ssid_len > 0)
                return wl1271_cmd_template_set(wl,
                                               CMD_TEMPL_AP_PROBE_RESPONSE,
                                               probe_rsp_data,
@@ -3153,16 +3315,18 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
 }
 
 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
+                                      struct ieee80211_vif *vif,
                                       struct ieee80211_bss_conf *bss_conf,
                                       u32 changed)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret = 0;
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
                if (bss_conf->use_short_slot)
-                       ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+                       ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
                else
-                       ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+                       ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
                if (ret < 0) {
                        wl1271_warning("Set slot time failed %d", ret);
                        goto out;
@@ -3171,16 +3335,18 @@ static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
 
        if (changed & BSS_CHANGED_ERP_PREAMBLE) {
                if (bss_conf->use_short_preamble)
-                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+                       wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
                else
-                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+                       wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
        }
 
        if (changed & BSS_CHANGED_ERP_CTS_PROT) {
                if (bss_conf->use_cts_prot)
-                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+                       ret = wl1271_acx_cts_protect(wl, wlvif,
+                                                    CTSPROTECT_ENABLE);
                else
-                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+                       ret = wl1271_acx_cts_protect(wl, wlvif,
+                                                    CTSPROTECT_DISABLE);
                if (ret < 0) {
                        wl1271_warning("Set ctsprotect failed %d", ret);
                        goto out;
@@ -3196,14 +3362,23 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
                                          struct ieee80211_bss_conf *bss_conf,
                                          u32 changed)
 {
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
        int ret = 0;
 
        if ((changed & BSS_CHANGED_BEACON_INT)) {
                wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
                        bss_conf->beacon_int);
 
-               wl->beacon_int = bss_conf->beacon_int;
+               wlvif->beacon_int = bss_conf->beacon_int;
+       }
+
+       if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
+               u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+               if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
+                       wl1271_debug(DEBUG_AP, "probe response updated");
+                       set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
+               }
        }
 
        if ((changed & BSS_CHANGED_BEACON)) {
@@ -3214,17 +3389,19 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
                struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
                u16 tmpl_id;
 
-               if (!beacon)
+               if (!beacon) {
+                       ret = -EINVAL;
                        goto out;
+               }
 
                wl1271_debug(DEBUG_MASTER, "beacon updated");
 
-               ret = wl1271_ssid_set(wl, beacon, ieoffset);
+               ret = wl1271_ssid_set(vif, beacon, ieoffset);
                if (ret < 0) {
                        dev_kfree_skb(beacon);
                        goto out;
                }
-               min_rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+               min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
                tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
                                  CMD_TEMPL_BEACON;
                ret = wl1271_cmd_template_set(wl, tmpl_id,
@@ -3236,6 +3413,13 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
                        goto out;
                }
 
+               /*
+                * In case we already have a probe-resp beacon set explicitly
+                * by usermode, don't use the beacon data.
+                */
+               if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
+                       goto end_bcn;
+
                /* remove TIM ie from probe response */
                wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
 
@@ -3254,7 +3438,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
                hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                                 IEEE80211_STYPE_PROBE_RESP);
                if (is_ap)
-                       ret = wl1271_ap_set_probe_resp_tmpl(wl,
+                       ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
                                                beacon->data,
                                                beacon->len,
                                                min_rate);
@@ -3264,12 +3448,15 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
                                                beacon->data,
                                                beacon->len, 0,
                                                min_rate);
+end_bcn:
                dev_kfree_skb(beacon);
                if (ret < 0)
                        goto out;
        }
 
 out:
+       if (ret != 0)
+               wl1271_error("beacon info change failed: %d", ret);
        return ret;
 }
 
@@ -3279,23 +3466,24 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
                                       struct ieee80211_bss_conf *bss_conf,
                                       u32 changed)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret = 0;
 
        if ((changed & BSS_CHANGED_BASIC_RATES)) {
                u32 rates = bss_conf->basic_rates;
 
-               wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
-                                                                wl->band);
-               wl->basic_rate = wl1271_tx_min_rate_get(wl,
-                                                       wl->basic_rate_set);
+               wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
+                                                                wlvif->band);
+               wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
+                                                       wlvif->basic_rate_set);
 
-               ret = wl1271_init_ap_rates(wl);
+               ret = wl1271_init_ap_rates(wl, wlvif);
                if (ret < 0) {
                        wl1271_error("AP rate policy change failed %d", ret);
                        goto out;
                }
 
-               ret = wl1271_ap_init_templates(wl);
+               ret = wl1271_ap_init_templates(wl, vif);
                if (ret < 0)
                        goto out;
        }
@@ -3306,38 +3494,40 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
 
        if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
                if (bss_conf->enable_beacon) {
-                       if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
-                               ret = wl12xx_cmd_role_start_ap(wl);
+                       if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+                               ret = wl12xx_cmd_role_start_ap(wl, wlvif);
                                if (ret < 0)
                                        goto out;
 
-                               ret = wl1271_ap_init_hwenc(wl);
+                               ret = wl1271_ap_init_hwenc(wl, wlvif);
                                if (ret < 0)
                                        goto out;
 
-                               set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+                               set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
                                wl1271_debug(DEBUG_AP, "started AP");
                        }
                } else {
-                       if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
-                               ret = wl12xx_cmd_role_stop_ap(wl);
+                       if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+                               ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
                                if (ret < 0)
                                        goto out;
 
-                               clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+                               clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
+                               clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
+                                         &wlvif->flags);
                                wl1271_debug(DEBUG_AP, "stopped AP");
                        }
                }
        }
 
-       ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+       ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
        if (ret < 0)
                goto out;
 
        /* Handle HT information change */
        if ((changed & BSS_CHANGED_HT) &&
            (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
-               ret = wl1271_acx_set_ht_information(wl,
+               ret = wl1271_acx_set_ht_information(wl, wlvif,
                                        bss_conf->ht_operation_mode);
                if (ret < 0) {
                        wl1271_warning("Set ht information failed %d", ret);
@@ -3355,8 +3545,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
                                        struct ieee80211_bss_conf *bss_conf,
                                        u32 changed)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        bool do_join = false, set_assoc = false;
-       bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+       bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
        bool ibss_joined = false;
        u32 sta_rate_set = 0;
        int ret;
@@ -3373,14 +3564,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
 
        if (changed & BSS_CHANGED_IBSS) {
                if (bss_conf->ibss_joined) {
-                       set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags);
+                       set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
                        ibss_joined = true;
                } else {
-                       if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED,
-                                              &wl->flags)) {
-                               wl1271_unjoin(wl);
-                               wl12xx_cmd_role_start_dev(wl);
-                               wl12xx_roc(wl, wl->dev_role_id);
+                       if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
+                                              &wlvif->flags)) {
+                               wl1271_unjoin(wl, wlvif);
+                               wl12xx_start_dev(wl, wlvif);
                        }
                }
        }
@@ -3396,46 +3586,40 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
                wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
                             bss_conf->enable_beacon ? "enabled" : "disabled");
 
-               if (bss_conf->enable_beacon)
-                       wl->set_bss_type = BSS_TYPE_IBSS;
-               else
-                       wl->set_bss_type = BSS_TYPE_STA_BSS;
                do_join = true;
        }
 
+       if (changed & BSS_CHANGED_IDLE) {
+               ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+               if (ret < 0)
+                       wl1271_warning("idle mode change failed %d", ret);
+       }
+
        if ((changed & BSS_CHANGED_CQM)) {
                bool enable = false;
                if (bss_conf->cqm_rssi_thold)
                        enable = true;
-               ret = wl1271_acx_rssi_snr_trigger(wl, enable,
+               ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
                                                  bss_conf->cqm_rssi_thold,
                                                  bss_conf->cqm_rssi_hyst);
                if (ret < 0)
                        goto out;
-               wl->rssi_thold = bss_conf->cqm_rssi_thold;
+               wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
        }
 
-       if ((changed & BSS_CHANGED_BSSID) &&
-           /*
-            * Now we know the correct bssid, so we send a new join command
-            * and enable the BSSID filter
-            */
-           memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
-               memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
-
-               if (!is_zero_ether_addr(wl->bssid)) {
-                       ret = wl1271_cmd_build_null_data(wl);
+       if (changed & BSS_CHANGED_BSSID)
+               if (!is_zero_ether_addr(bss_conf->bssid)) {
+                       ret = wl12xx_cmd_build_null_data(wl, wlvif);
                        if (ret < 0)
                                goto out;
 
-                       ret = wl1271_build_qos_null_data(wl);
+                       ret = wl1271_build_qos_null_data(wl, vif);
                        if (ret < 0)
                                goto out;
 
                        /* Need to update the BSSID (for filtering etc) */
                        do_join = true;
                }
-       }
 
        if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
                rcu_read_lock();
@@ -3459,26 +3643,28 @@ sta_not_found:
                if (bss_conf->assoc) {
                        u32 rates;
                        int ieoffset;
-                       wl->aid = bss_conf->aid;
+                       wlvif->aid = bss_conf->aid;
                        set_assoc = true;
 
-                       wl->ps_poll_failures = 0;
+                       wlvif->ps_poll_failures = 0;
 
                        /*
                         * use basic rates from AP, and determine lowest rate
                         * to use with control frames.
                         */
                        rates = bss_conf->basic_rates;
-                       wl->basic_rate_set =
+                       wlvif->basic_rate_set =
                                wl1271_tx_enabled_rates_get(wl, rates,
-                                                           wl->band);
-                       wl->basic_rate =
-                               wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+                                                           wlvif->band);
+                       wlvif->basic_rate =
+                               wl1271_tx_min_rate_get(wl,
+                                                      wlvif->basic_rate_set);
                        if (sta_rate_set)
-                               wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+                               wlvif->rate_set =
+                                       wl1271_tx_enabled_rates_get(wl,
                                                                sta_rate_set,
-                                                               wl->band);
-                       ret = wl1271_acx_sta_rate_policies(wl);
+                                                               wlvif->band);
+                       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
                        if (ret < 0)
                                goto out;
 
@@ -3488,53 +3674,56 @@ sta_not_found:
                         * updates it by itself when the first beacon is
                         * received after a join.
                         */
-                       ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
+                       ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
                        if (ret < 0)
                                goto out;
 
                        /*
                         * Get a template for hardware connection maintenance
                         */
-                       dev_kfree_skb(wl->probereq);
-                       wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL);
+                       dev_kfree_skb(wlvif->probereq);
+                       wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
+                                                                       wlvif,
+                                                                       NULL);
                        ieoffset = offsetof(struct ieee80211_mgmt,
                                            u.probe_req.variable);
-                       wl1271_ssid_set(wl, wl->probereq, ieoffset);
+                       wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
 
                        /* enable the connection monitoring feature */
-                       ret = wl1271_acx_conn_monit_params(wl, true);
+                       ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
                        if (ret < 0)
                                goto out;
                } else {
                        /* use defaults when not associated */
                        bool was_assoc =
-                           !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED,
-                                                &wl->flags);
+                           !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
+                                                &wlvif->flags);
                        bool was_ifup =
-                           !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT,
-                                                &wl->flags);
-                       wl->aid = 0;
+                           !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
+                                                &wlvif->flags);
+                       wlvif->aid = 0;
 
                        /* free probe-request template */
-                       dev_kfree_skb(wl->probereq);
-                       wl->probereq = NULL;
+                       dev_kfree_skb(wlvif->probereq);
+                       wlvif->probereq = NULL;
 
                        /* re-enable dynamic ps - just in case */
-                       ieee80211_enable_dyn_ps(wl->vif);
+                       ieee80211_enable_dyn_ps(vif);
 
                        /* revert back to minimum rates for the current band */
-                       wl1271_set_band_rate(wl);
-                       wl->basic_rate =
-                               wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-                       ret = wl1271_acx_sta_rate_policies(wl);
+                       wl1271_set_band_rate(wl, wlvif);
+                       wlvif->basic_rate =
+                               wl1271_tx_min_rate_get(wl,
+                                                      wlvif->basic_rate_set);
+                       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
                        if (ret < 0)
                                goto out;
 
                        /* disable connection monitor features */
-                       ret = wl1271_acx_conn_monit_params(wl, false);
+                       ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
 
                        /* Disable the keep-alive feature */
-                       ret = wl1271_acx_keep_alive_mode(wl, false);
+                       ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
                        if (ret < 0)
                                goto out;
 
@@ -3546,7 +3735,7 @@ sta_not_found:
                                 * no IF_OPER_UP notification.
                                 */
                                if (!was_ifup) {
-                                       ret = wl12xx_croc(wl, wl->role_id);
+                                       ret = wl12xx_croc(wl, wlvif->role_id);
                                        if (ret < 0)
                                                goto out;
                                }
@@ -3555,17 +3744,16 @@ sta_not_found:
                                 * roaming on the same channel. until we will
                                 * have a better flow...)
                                 */
-                               if (test_bit(wl->dev_role_id, wl->roc_map)) {
-                                       ret = wl12xx_croc(wl, wl->dev_role_id);
+                               if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+                                       ret = wl12xx_croc(wl,
+                                                         wlvif->dev_role_id);
                                        if (ret < 0)
                                                goto out;
                                }
 
-                               wl1271_unjoin(wl);
-                               if (!(conf_flags & IEEE80211_CONF_IDLE)) {
-                                       wl12xx_cmd_role_start_dev(wl);
-                                       wl12xx_roc(wl, wl->dev_role_id);
-                               }
+                               wl1271_unjoin(wl, wlvif);
+                               if (!(conf_flags & IEEE80211_CONF_IDLE))
+                                       wl12xx_start_dev(wl, wlvif);
                        }
                }
        }
@@ -3576,27 +3764,28 @@ sta_not_found:
 
                if (bss_conf->ibss_joined) {
                        u32 rates = bss_conf->basic_rates;
-                       wl->basic_rate_set =
+                       wlvif->basic_rate_set =
                                wl1271_tx_enabled_rates_get(wl, rates,
-                                                           wl->band);
-                       wl->basic_rate =
-                               wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+                                                           wlvif->band);
+                       wlvif->basic_rate =
+                               wl1271_tx_min_rate_get(wl,
+                                                      wlvif->basic_rate_set);
 
                        /* by default, use 11b + OFDM rates */
-                       wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
-                       ret = wl1271_acx_sta_rate_policies(wl);
+                       wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
+                       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
                        if (ret < 0)
                                goto out;
                }
        }
 
-       ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+       ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
        if (ret < 0)
                goto out;
 
        if (changed & BSS_CHANGED_ARP_FILTER) {
                __be32 addr = bss_conf->arp_addr_list[0];
-               WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+               WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
 
                if (bss_conf->arp_addr_cnt == 1 &&
                    bss_conf->arp_filter_enabled) {
@@ -3606,24 +3795,24 @@ sta_not_found:
                         * isn't being set (when sending), so we have to
                         * reconfigure the template upon every ip change.
                         */
-                       ret = wl1271_cmd_build_arp_rsp(wl, addr);
+                       ret = wl1271_cmd_build_arp_rsp(wl, wlvif, addr);
                        if (ret < 0) {
                                wl1271_warning("build arp rsp failed: %d", ret);
                                goto out;
                        }
 
-                       ret = wl1271_acx_arp_ip_filter(wl,
+                       ret = wl1271_acx_arp_ip_filter(wl, wlvif,
                                ACX_ARP_FILTER_ARP_FILTERING,
                                addr);
                } else
-                       ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
+                       ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
 
                if (ret < 0)
                        goto out;
        }
 
        if (do_join) {
-               ret = wl1271_join(wl, set_assoc);
+               ret = wl1271_join(wl, wlvif, set_assoc);
                if (ret < 0) {
                        wl1271_warning("cmd join failed %d", ret);
                        goto out;
@@ -3631,35 +3820,31 @@ sta_not_found:
 
                /* ROC until connected (after EAPOL exchange) */
                if (!is_ibss) {
-                       ret = wl12xx_roc(wl, wl->role_id);
+                       ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
                        if (ret < 0)
                                goto out;
 
-                       wl1271_check_operstate(wl,
+                       wl1271_check_operstate(wl, wlvif,
                                               ieee80211_get_operstate(vif));
                }
                /*
                 * stop device role if started (we might already be in
                 * STA role). TODO: make it better.
                 */
-               if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) {
-                       ret = wl12xx_croc(wl, wl->dev_role_id);
-                       if (ret < 0)
-                               goto out;
-
-                       ret = wl12xx_cmd_role_stop_dev(wl);
+               if (wlvif->dev_role_id != WL12XX_INVALID_ROLE_ID) {
+                       ret = wl12xx_stop_dev(wl, wlvif);
                        if (ret < 0)
                                goto out;
                }
 
                /* If we want to go in PSM but we're not there yet */
-               if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
-                   !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+               if (test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags) &&
+                   !test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
                        enum wl1271_cmd_ps_mode mode;
 
                        mode = STATION_POWER_SAVE_MODE;
-                       ret = wl1271_ps_set_mode(wl, mode,
-                                                wl->basic_rate,
+                       ret = wl1271_ps_set_mode(wl, wlvif, mode,
+                                                wlvif->basic_rate,
                                                 true);
                        if (ret < 0)
                                goto out;
@@ -3673,7 +3858,7 @@ sta_not_found:
                        ret = wl1271_acx_set_ht_capabilities(wl,
                                                             &sta_ht_cap,
                                                             true,
-                                                            wl->sta_hlid);
+                                                            wlvif->sta.hlid);
                        if (ret < 0) {
                                wl1271_warning("Set ht cap true failed %d",
                                               ret);
@@ -3685,7 +3870,7 @@ sta_not_found:
                        ret = wl1271_acx_set_ht_capabilities(wl,
                                                             &sta_ht_cap,
                                                             false,
-                                                            wl->sta_hlid);
+                                                            wlvif->sta.hlid);
                        if (ret < 0) {
                                wl1271_warning("Set ht cap false failed %d",
                                               ret);
@@ -3697,7 +3882,7 @@ sta_not_found:
        /* Handle HT information change. Done after join. */
        if ((changed & BSS_CHANGED_HT) &&
            (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
-               ret = wl1271_acx_set_ht_information(wl,
+               ret = wl1271_acx_set_ht_information(wl, wlvif,
                                        bss_conf->ht_operation_mode);
                if (ret < 0) {
                        wl1271_warning("Set ht information failed %d", ret);
@@ -3715,7 +3900,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                                       u32 changed)
 {
        struct wl1271 *wl = hw->priv;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
        int ret;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
@@ -3726,6 +3912,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
+       if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
+               goto out;
+
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
@@ -3746,6 +3935,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
                             const struct ieee80211_tx_queue_params *params)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        u8 ps_scheme;
        int ret = 0;
 
@@ -3792,13 +3982,13 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
         * the txop is confed in units of 32us by the mac80211,
         * we need us
         */
-       ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+       ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
                                params->cw_min, params->cw_max,
                                params->aifs, params->txop << 5);
        if (ret < 0)
                goto out_sleep;
 
-       ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+       ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
                                 CONF_CHANNEL_TYPE_EDCF,
                                 wl1271_tx_get_queue(queue),
                                 ps_scheme, CONF_ACK_POLICY_LEGACY,
@@ -3861,43 +4051,43 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
 }
 
 static int wl1271_allocate_sta(struct wl1271 *wl,
-                            struct ieee80211_sta *sta,
-                            u8 *hlid)
+                            struct wl12xx_vif *wlvif,
+                            struct ieee80211_sta *sta)
 {
        struct wl1271_station *wl_sta;
-       int id;
+       int ret;
 
-       id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
-       if (id >= AP_MAX_STATIONS) {
+
+       if (wl->active_sta_count >= AP_MAX_STATIONS) {
                wl1271_warning("could not allocate HLID - too much stations");
                return -EBUSY;
        }
 
        wl_sta = (struct wl1271_station *)sta->drv_priv;
-       set_bit(id, wl->ap_hlid_map);
-       wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
-       *hlid = wl_sta->hlid;
+       ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
+       if (ret < 0) {
+               wl1271_warning("could not allocate HLID - too many links");
+               return -EBUSY;
+       }
+
+       set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
        memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
        wl->active_sta_count++;
        return 0;
 }
 
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
 {
-       int id = hlid - WL1271_AP_STA_HLID_START;
-
-       if (hlid < WL1271_AP_STA_HLID_START)
-               return;
-
-       if (!test_bit(id, wl->ap_hlid_map))
+       if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
                return;
 
-       clear_bit(id, wl->ap_hlid_map);
+       clear_bit(hlid, wlvif->ap.sta_hlid_map);
        memset(wl->links[hlid].addr, 0, ETH_ALEN);
        wl->links[hlid].ba_bitmap = 0;
        wl1271_tx_reset_link_queues(wl, hlid);
        __clear_bit(hlid, &wl->ap_ps_map);
        __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+       wl12xx_free_link(wl, wlvif, &hlid);
        wl->active_sta_count--;
 }
 
@@ -3906,6 +4096,8 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
                             struct ieee80211_sta *sta)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       struct wl1271_station *wl_sta;
        int ret = 0;
        u8 hlid;
 
@@ -3914,20 +4106,23 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       if (wl->bss_type != BSS_TYPE_AP_BSS)
+       if (wlvif->bss_type != BSS_TYPE_AP_BSS)
                goto out;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
 
-       ret = wl1271_allocate_sta(wl, sta, &hlid);
+       ret = wl1271_allocate_sta(wl, wlvif, sta);
        if (ret < 0)
                goto out;
 
+       wl_sta = (struct wl1271_station *)sta->drv_priv;
+       hlid = wl_sta->hlid;
+
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out_free_sta;
 
-       ret = wl12xx_cmd_add_peer(wl, sta, hlid);
+       ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
        if (ret < 0)
                goto out_sleep;
 
@@ -3944,7 +4139,7 @@ out_sleep:
 
 out_free_sta:
        if (ret < 0)
-               wl1271_free_sta(wl, hlid);
+               wl1271_free_sta(wl, wlvif, hlid);
 
 out:
        mutex_unlock(&wl->mutex);
@@ -3956,6 +4151,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
                                struct ieee80211_sta *sta)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct wl1271_station *wl_sta;
        int ret = 0, id;
 
@@ -3964,14 +4160,14 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       if (wl->bss_type != BSS_TYPE_AP_BSS)
+       if (wlvif->bss_type != BSS_TYPE_AP_BSS)
                goto out;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
 
        wl_sta = (struct wl1271_station *)sta->drv_priv;
-       id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
-       if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+       id = wl_sta->hlid;
+       if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -3982,7 +4178,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out_sleep;
 
-       wl1271_free_sta(wl, wl_sta->hlid);
+       wl1271_free_sta(wl, wlvif, wl_sta->hlid);
 
 out_sleep:
        wl1271_ps_elp_sleep(wl);
@@ -3999,6 +4195,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
                                  u8 buf_size)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret;
        u8 hlid, *ba_bitmap;
 
@@ -4016,10 +4213,10 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
                goto out;
        }
 
-       if (wl->bss_type == BSS_TYPE_STA_BSS) {
-               hlid = wl->sta_hlid;
-               ba_bitmap = &wl->ba_rx_bitmap;
-       } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
+               hlid = wlvif->sta.hlid;
+               ba_bitmap = &wlvif->sta.ba_rx_bitmap;
+       } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
                struct wl1271_station *wl_sta;
 
                wl_sta = (struct wl1271_station *)sta->drv_priv;
@@ -4039,7 +4236,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
 
        switch (action) {
        case IEEE80211_AMPDU_RX_START:
-               if (!wl->ba_support || !wl->ba_allowed) {
+               if (!wlvif->ba_support || !wlvif->ba_allowed) {
                        ret = -ENOTSUPP;
                        break;
                }
@@ -4108,8 +4305,9 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif,
                                   const struct cfg80211_bitrate_mask *mask)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct wl1271 *wl = hw->priv;
-       int i;
+       int i, ret = 0;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
                mask->control[NL80211_BAND_2GHZ].legacy,
@@ -4118,19 +4316,39 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
        mutex_lock(&wl->mutex);
 
        for (i = 0; i < IEEE80211_NUM_BANDS; i++)
-               wl->bitrate_masks[i] =
+               wlvif->bitrate_masks[i] =
                        wl1271_tx_enabled_rates_get(wl,
                                                    mask->control[i].legacy,
                                                    i);
+
+       if (unlikely(wl->state == WL1271_STATE_OFF))
+               goto out;
+
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+           !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
+
+               ret = wl1271_ps_elp_wakeup(wl);
+               if (ret < 0)
+                       goto out;
+
+               wl1271_set_band_rate(wl, wlvif);
+               wlvif->basic_rate =
+                       wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+               ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+
+               wl1271_ps_elp_sleep(wl);
+       }
+out:
        mutex_unlock(&wl->mutex);
 
-       return 0;
+       return ret;
 }
 
 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
                                     struct ieee80211_channel_switch *ch_switch)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
        int ret;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
@@ -4138,19 +4356,24 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
        mutex_lock(&wl->mutex);
 
        if (unlikely(wl->state == WL1271_STATE_OFF)) {
-               mutex_unlock(&wl->mutex);
-               ieee80211_chswitch_done(wl->vif, false);
-               return;
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+                       ieee80211_chswitch_done(vif, false);
+               }
+               goto out;
        }
 
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
-       ret = wl12xx_cmd_channel_switch(wl, ch_switch);
+       /* TODO: change mac80211 to pass vif as param */
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               ret = wl12xx_cmd_channel_switch(wl, ch_switch);
 
-       if (!ret)
-               set_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags);
+               if (!ret)
+                       set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+       }
 
        wl1271_ps_elp_sleep(wl);
 
@@ -4170,10 +4393,6 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
 
        /* packets are considered pending if in the TX queue or the FW */
        ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
-
-       /* the above is appropriate for STA mode for PS purposes */
-       WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
-
 out:
        mutex_unlock(&wl->mutex);
 
@@ -4604,7 +4823,7 @@ static struct bin_attribute fwlog_attr = {
        .read = wl1271_sysfs_read_fwlog,
 };
 
-int wl1271_register_hw(struct wl1271 *wl)
+static int wl1271_register_hw(struct wl1271 *wl)
 {
        int ret;
 
@@ -4645,9 +4864,8 @@ int wl1271_register_hw(struct wl1271 *wl)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(wl1271_register_hw);
 
-void wl1271_unregister_hw(struct wl1271 *wl)
+static void wl1271_unregister_hw(struct wl1271 *wl)
 {
        if (wl->state == WL1271_STATE_PLT)
                __wl1271_plt_stop(wl);
@@ -4657,9 +4875,8 @@ void wl1271_unregister_hw(struct wl1271 *wl)
        wl->mac80211_registered = false;
 
 }
-EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
 
-int wl1271_init_ieee80211(struct wl1271 *wl)
+static int wl1271_init_ieee80211(struct wl1271 *wl)
 {
        static const u32 cipher_suites[] = {
                WLAN_CIPHER_SUITE_WEP40,
@@ -4736,27 +4953,33 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
 
        wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
 
-       SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+       /* the FW answers probe-requests in AP-mode */
+       wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+       wl->hw->wiphy->probe_resp_offload =
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+       SET_IEEE80211_DEV(wl->hw, wl->dev);
 
        wl->hw->sta_data_size = sizeof(struct wl1271_station);
+       wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
 
        wl->hw->max_rx_aggregation_subframes = 8;
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
 
 #define WL1271_DEFAULT_CHANNEL 0
 
-struct ieee80211_hw *wl1271_alloc_hw(void)
+static struct ieee80211_hw *wl1271_alloc_hw(void)
 {
        struct ieee80211_hw *hw;
-       struct platform_device *plat_dev = NULL;
        struct wl1271 *wl;
        int i, j, ret;
        unsigned int order;
 
-       BUILD_BUG_ON(AP_MAX_LINKS > WL12XX_MAX_LINKS);
+       BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
 
        hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
        if (!hw) {
@@ -4765,41 +4988,26 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
                goto err_hw_alloc;
        }
 
-       plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL);
-       if (!plat_dev) {
-               wl1271_error("could not allocate platform_device");
-               ret = -ENOMEM;
-               goto err_plat_alloc;
-       }
-
        wl = hw->priv;
        memset(wl, 0, sizeof(*wl));
 
        INIT_LIST_HEAD(&wl->list);
+       INIT_LIST_HEAD(&wl->wlvif_list);
 
        wl->hw = hw;
-       wl->plat_dev = plat_dev;
 
        for (i = 0; i < NUM_TX_QUEUES; i++)
-               skb_queue_head_init(&wl->tx_queue[i]);
-
-       for (i = 0; i < NUM_TX_QUEUES; i++)
-               for (j = 0; j < AP_MAX_LINKS; j++)
+               for (j = 0; j < WL12XX_MAX_LINKS; j++)
                        skb_queue_head_init(&wl->links[j].tx_queue[i]);
 
        skb_queue_head_init(&wl->deferred_rx_queue);
        skb_queue_head_init(&wl->deferred_tx_queue);
 
        INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
-       INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
        INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
        INIT_WORK(&wl->tx_work, wl1271_tx_work);
        INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
        INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
-       INIT_WORK(&wl->rx_streaming_enable_work,
-                 wl1271_rx_streaming_enable_work);
-       INIT_WORK(&wl->rx_streaming_disable_work,
-                 wl1271_rx_streaming_disable_work);
 
        wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
        if (!wl->freezable_wq) {
@@ -4808,41 +5016,21 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
        }
 
        wl->channel = WL1271_DEFAULT_CHANNEL;
-       wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
-       wl->default_key = 0;
        wl->rx_counter = 0;
-       wl->psm_entry_retry = 0;
        wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
-       wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
-       wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
-       wl->rate_set = CONF_TX_RATE_MASK_BASIC;
        wl->band = IEEE80211_BAND_2GHZ;
        wl->vif = NULL;
        wl->flags = 0;
        wl->sg_enabled = true;
        wl->hw_pg_ver = -1;
-       wl->bss_type = MAX_BSS_TYPE;
-       wl->set_bss_type = MAX_BSS_TYPE;
-       wl->last_tx_hlid = 0;
        wl->ap_ps_map = 0;
        wl->ap_fw_ps_map = 0;
        wl->quirks = 0;
        wl->platform_quirks = 0;
        wl->sched_scanning = false;
-       wl->tx_security_seq = 0;
-       wl->tx_security_last_seq_lsb = 0;
        wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
-       wl->role_id = WL12XX_INVALID_ROLE_ID;
        wl->system_hlid = WL12XX_SYSTEM_HLID;
-       wl->sta_hlid = WL12XX_INVALID_LINK_ID;
-       wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
-       wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-       wl->session_counter = 0;
-       wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
-       wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
        wl->active_sta_count = 0;
-       setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
-                   (unsigned long) wl);
        wl->fwlog_size = 0;
        init_waitqueue_head(&wl->fwlog_waitq);
 
@@ -4860,8 +5048,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
 
        /* Apply default driver configuration. */
        wl1271_conf_init(wl);
-       wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
-       wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
 
        order = get_order(WL1271_AGGR_BUFFER_SIZE);
        wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -4883,49 +5069,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
                goto err_dummy_packet;
        }
 
-       /* Register platform device */
-       ret = platform_device_register(wl->plat_dev);
-       if (ret) {
-               wl1271_error("couldn't register platform device");
-               goto err_fwlog;
-       }
-       dev_set_drvdata(&wl->plat_dev->dev, wl);
-
-       /* Create sysfs file to control bt coex state */
-       ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-       if (ret < 0) {
-               wl1271_error("failed to create sysfs file bt_coex_state");
-               goto err_platform;
-       }
-
-       /* Create sysfs file to get HW PG version */
-       ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
-       if (ret < 0) {
-               wl1271_error("failed to create sysfs file hw_pg_ver");
-               goto err_bt_coex_state;
-       }
-
-       /* Create sysfs file for the FW log */
-       ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr);
-       if (ret < 0) {
-               wl1271_error("failed to create sysfs file fwlog");
-               goto err_hw_pg_ver;
-       }
-
        return hw;
 
-err_hw_pg_ver:
-       device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
-
-err_bt_coex_state:
-       device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-
-err_platform:
-       platform_device_unregister(wl->plat_dev);
-
-err_fwlog:
-       free_page((unsigned long)wl->fwlog);
-
 err_dummy_packet:
        dev_kfree_skb(wl->dummy_packet);
 
@@ -4937,18 +5082,14 @@ err_wq:
 
 err_hw:
        wl1271_debugfs_exit(wl);
-       kfree(plat_dev);
-
-err_plat_alloc:
        ieee80211_free_hw(hw);
 
 err_hw_alloc:
 
        return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
 
-int wl1271_free_hw(struct wl1271 *wl)
+static int wl1271_free_hw(struct wl1271 *wl)
 {
        /* Unblock any fwlog readers */
        mutex_lock(&wl->mutex);
@@ -4956,17 +5097,15 @@ int wl1271_free_hw(struct wl1271 *wl)
        wake_up_interruptible_all(&wl->fwlog_waitq);
        mutex_unlock(&wl->mutex);
 
-       device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr);
+       device_remove_bin_file(wl->dev, &fwlog_attr);
 
-       device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
+       device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
 
-       device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-       platform_device_unregister(wl->plat_dev);
+       device_remove_file(wl->dev, &dev_attr_bt_coex_state);
        free_page((unsigned long)wl->fwlog);
        dev_kfree_skb(wl->dummy_packet);
        free_pages((unsigned long)wl->aggr_buf,
                        get_order(WL1271_AGGR_BUFFER_SIZE));
-       kfree(wl->plat_dev);
 
        wl1271_debugfs_exit(wl);
 
@@ -4983,7 +5122,174 @@ int wl1271_free_hw(struct wl1271 *wl)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(wl1271_free_hw);
+
+static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
+{
+       struct wl1271 *wl = cookie;
+       unsigned long flags;
+
+       wl1271_debug(DEBUG_IRQ, "IRQ");
+
+       /* complete the ELP completion */
+       spin_lock_irqsave(&wl->wl_lock, flags);
+       set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+       if (wl->elp_compl) {
+               complete(wl->elp_compl);
+               wl->elp_compl = NULL;
+       }
+
+       if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
+               /* don't enqueue a work right now. mark it as pending */
+               set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
+               wl1271_debug(DEBUG_IRQ, "should not enqueue work");
+               disable_irq_nosync(wl->irq);
+               pm_wakeup_event(wl->dev, 0);
+               spin_unlock_irqrestore(&wl->wl_lock, flags);
+               return IRQ_HANDLED;
+       }
+       spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+       return IRQ_WAKE_THREAD;
+}
+
+static int __devinit wl12xx_probe(struct platform_device *pdev)
+{
+       struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
+       struct ieee80211_hw *hw;
+       struct wl1271 *wl;
+       unsigned long irqflags;
+       int ret = -ENODEV;
+
+       hw = wl1271_alloc_hw();
+       if (IS_ERR(hw)) {
+               wl1271_error("can't allocate hw");
+               ret = PTR_ERR(hw);
+               goto out;
+       }
+
+       wl = hw->priv;
+       wl->irq = platform_get_irq(pdev, 0);
+       wl->ref_clock = pdata->board_ref_clock;
+       wl->tcxo_clock = pdata->board_tcxo_clock;
+       wl->platform_quirks = pdata->platform_quirks;
+       wl->set_power = pdata->set_power;
+       wl->dev = &pdev->dev;
+       wl->if_ops = pdata->ops;
+
+       platform_set_drvdata(pdev, wl);
+
+       if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+               irqflags = IRQF_TRIGGER_RISING;
+       else
+               irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+
+       ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
+                                  irqflags,
+                                  pdev->name, wl);
+       if (ret < 0) {
+               wl1271_error("request_irq() failed: %d", ret);
+               goto out_free_hw;
+       }
+
+       ret = enable_irq_wake(wl->irq);
+       if (!ret) {
+               wl->irq_wake_enabled = true;
+               device_init_wakeup(wl->dev, 1);
+               if (pdata->pwr_in_suspend)
+                       hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+
+       }
+       disable_irq(wl->irq);
+
+       ret = wl1271_init_ieee80211(wl);
+       if (ret)
+               goto out_irq;
+
+       ret = wl1271_register_hw(wl);
+       if (ret)
+               goto out_irq;
+
+       /* Create sysfs file to control bt coex state */
+       ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
+       if (ret < 0) {
+               wl1271_error("failed to create sysfs file bt_coex_state");
+               goto out_irq;
+       }
+
+       /* Create sysfs file to get HW PG version */
+       ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
+       if (ret < 0) {
+               wl1271_error("failed to create sysfs file hw_pg_ver");
+               goto out_bt_coex_state;
+       }
+
+       /* Create sysfs file for the FW log */
+       ret = device_create_bin_file(wl->dev, &fwlog_attr);
+       if (ret < 0) {
+               wl1271_error("failed to create sysfs file fwlog");
+               goto out_hw_pg_ver;
+       }
+
+       return 0;
+
+out_hw_pg_ver:
+       device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+
+out_bt_coex_state:
+       device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+
+out_irq:
+       free_irq(wl->irq, wl);
+
+out_free_hw:
+       wl1271_free_hw(wl);
+
+out:
+       return ret;
+}
+
+static int __devexit wl12xx_remove(struct platform_device *pdev)
+{
+       struct wl1271 *wl = platform_get_drvdata(pdev);
+
+       if (wl->irq_wake_enabled) {
+               device_init_wakeup(wl->dev, 0);
+               disable_irq_wake(wl->irq);
+       }
+       wl1271_unregister_hw(wl);
+       free_irq(wl->irq, wl);
+       wl1271_free_hw(wl);
+
+       return 0;
+}
+
+static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
+       { "wl12xx", 0 },
+       {  } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
+
+static struct platform_driver wl12xx_driver = {
+       .probe          = wl12xx_probe,
+       .remove         = __devexit_p(wl12xx_remove),
+       .id_table       = wl12xx_id_table,
+       .driver = {
+               .name   = "wl12xx_driver",
+               .owner  = THIS_MODULE,
+       }
+};
+
+static int __init wl12xx_init(void)
+{
+       return platform_driver_register(&wl12xx_driver);
+}
+module_init(wl12xx_init);
+
+static void __exit wl12xx_exit(void)
+{
+       platform_driver_unregister(&wl12xx_driver);
+}
+module_exit(wl12xx_exit);
 
 u32 wl12xx_debug_level = DEBUG_NONE;
 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
index c15ebf2..a7a1108 100644 (file)
@@ -25,6 +25,7 @@
 #include "ps.h"
 #include "io.h"
 #include "tx.h"
+#include "debug.h"
 
 #define WL1271_WAKEUP_TIMEOUT 500
 
@@ -32,6 +33,7 @@ void wl1271_elp_work(struct work_struct *work)
 {
        struct delayed_work *dwork;
        struct wl1271 *wl;
+       struct wl12xx_vif *wlvif;
 
        dwork = container_of(work, struct delayed_work, work);
        wl = container_of(dwork, struct wl1271, elp_work);
@@ -47,11 +49,15 @@ void wl1271_elp_work(struct work_struct *work)
        if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
                goto out;
 
-       if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
-           (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
-            !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
+       if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
                goto out;
 
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+                   !test_bit(WL1271_FLAG_IDLE, &wl->flags))
+                       goto out;
+       }
+
        wl1271_debug(DEBUG_PSM, "chip to elp");
        wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
        set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
@@ -65,13 +71,17 @@ out:
 /* Routines to toggle sleep mode while in ELP */
 void wl1271_ps_elp_sleep(struct wl1271 *wl)
 {
+       struct wl12xx_vif *wlvif;
+
        /* we shouldn't get consecutive sleep requests */
        if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
                return;
 
-       if (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
-           !test_bit(WL1271_FLAG_IDLE, &wl->flags))
-               return;
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+                   !test_bit(WL1271_FLAG_IDLE, &wl->flags))
+                       return;
+       }
 
        ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
                                     msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -143,8 +153,8 @@ out:
        return 0;
 }
 
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
-                      u32 rates, bool send)
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      enum wl1271_cmd_ps_mode mode, u32 rates, bool send)
 {
        int ret;
 
@@ -152,39 +162,34 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
        case STATION_POWER_SAVE_MODE:
                wl1271_debug(DEBUG_PSM, "entering psm");
 
-               ret = wl1271_acx_wake_up_conditions(wl);
+               ret = wl1271_acx_wake_up_conditions(wl, wlvif);
                if (ret < 0) {
                        wl1271_error("couldn't set wake up conditions");
                        return ret;
                }
 
-               ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
+               ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
                if (ret < 0)
                        return ret;
 
-               set_bit(WL1271_FLAG_PSM, &wl->flags);
+               set_bit(WLVIF_FLAG_PSM, &wlvif->flags);
                break;
        case STATION_ACTIVE_MODE:
        default:
                wl1271_debug(DEBUG_PSM, "leaving psm");
 
                /* disable beacon early termination */
-               if (wl->band == IEEE80211_BAND_2GHZ) {
-                       ret = wl1271_acx_bet_enable(wl, false);
+               if (wlvif->band == IEEE80211_BAND_2GHZ) {
+                       ret = wl1271_acx_bet_enable(wl, wlvif, false);
                        if (ret < 0)
                                return ret;
                }
 
-               /* disable beacon filtering */
-               ret = wl1271_acx_beacon_filter_opt(wl, false);
-               if (ret < 0)
-                       return ret;
-
-               ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
+               ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE);
                if (ret < 0)
                        return ret;
 
-               clear_bit(WL1271_FLAG_PSM, &wl->flags);
+               clear_bit(WLVIF_FLAG_PSM, &wlvif->flags);
                break;
        }
 
@@ -223,9 +228,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
        wl1271_handle_tx_low_watermark(wl);
 }
 
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         u8 hlid, bool clean_queues)
 {
        struct ieee80211_sta *sta;
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 
        if (test_bit(hlid, &wl->ap_ps_map))
                return;
@@ -235,7 +242,7 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
                     clean_queues);
 
        rcu_read_lock();
-       sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+       sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
        if (!sta) {
                wl1271_error("could not find sta %pM for starting ps",
                             wl->links[hlid].addr);
@@ -253,9 +260,10 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
        __set_bit(hlid, &wl->ap_ps_map);
 }
 
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
 {
        struct ieee80211_sta *sta;
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 
        if (!test_bit(hlid, &wl->ap_ps_map))
                return;
@@ -265,7 +273,7 @@ void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
        __clear_bit(hlid, &wl->ap_ps_map);
 
        rcu_read_lock();
-       sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+       sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
        if (!sta) {
                wl1271_error("could not find sta %pM for ending ps",
                             wl->links[hlid].addr);
index 25eb9bc..a12052f 100644 (file)
 #include "wl12xx.h"
 #include "acx.h"
 
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
-                      u32 rates, bool send);
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      enum wl1271_cmd_ps_mode mode, u32 rates, bool send);
 void wl1271_ps_elp_sleep(struct wl1271 *wl);
 int wl1271_ps_elp_wakeup(struct wl1271 *wl);
 void wl1271_elp_work(struct work_struct *work);
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         u8 hlid, bool clean_queues);
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
 
 #define WL1271_PS_COMPLETE_TIMEOUT 500
 
index 3f570f3..df34d59 100644 (file)
 
 
 /* Firmware image load chunk size */
-#define CHUNK_SIZE          512
+#define CHUNK_SIZE     16384
 
 /* Firmware image header size */
 #define FW_HDR_SIZE 8
index dee4cfe..4fbd2a7 100644 (file)
 #include <linux/sched.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "acx.h"
 #include "reg.h"
 #include "rx.h"
+#include "tx.h"
 #include "io.h"
 
 static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
@@ -96,7 +98,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
 }
 
 static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
-                                bool unaligned)
+                                bool unaligned, u8 *hlid)
 {
        struct wl1271_rx_descriptor *desc;
        struct sk_buff *skb;
@@ -159,6 +161,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
         * payload aligned to 4 bytes.
         */
        memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
+       *hlid = desc->hlid;
 
        hdr = (struct ieee80211_hdr *)skb->data;
        if (ieee80211_is_beacon(hdr->frame_control))
@@ -169,10 +172,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
        wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
 
        seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
-       wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d", skb,
+       wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
                     skb->len - desc->pad_len,
                     beacon ? "beacon" : "",
-                    seq_num);
+                    seq_num, *hlid);
 
        skb_trim(skb, skb->len - desc->pad_len);
 
@@ -185,6 +188,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
 void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
 {
        struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
+       unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
        u32 buf_size;
        u32 fw_rx_counter  = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
        u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
@@ -192,8 +196,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
        u32 mem_block;
        u32 pkt_length;
        u32 pkt_offset;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
-       bool had_data = false;
+       u8 hlid;
        bool unaligned = false;
 
        while (drv_rx_counter != fw_rx_counter) {
@@ -253,8 +256,15 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
                         */
                        if (wl1271_rx_handle_data(wl,
                                                  wl->aggr_buf + pkt_offset,
-                                                 pkt_length, unaligned) == 1)
-                               had_data = true;
+                                                 pkt_length, unaligned,
+                                                 &hlid) == 1) {
+                               if (hlid < WL12XX_MAX_LINKS)
+                                       __set_bit(hlid, active_hlids);
+                               else
+                                       WARN(1,
+                                            "hlid exceeded WL12XX_MAX_LINKS "
+                                            "(%d)\n", hlid);
+                       }
 
                        wl->rx_counter++;
                        drv_rx_counter++;
@@ -270,17 +280,5 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
        if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
                wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
 
-       if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
-           (wl->conf.rx_streaming.always ||
-            test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
-               u32 timeout = wl->conf.rx_streaming.duration;
-
-               /* restart rx streaming */
-               if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
-                       ieee80211_queue_work(wl->hw,
-                                            &wl->rx_streaming_enable_work);
-
-               mod_timer(&wl->rx_streaming_timer,
-                         jiffies + msecs_to_jiffies(timeout));
-       }
+       wl12xx_rearm_rx_streaming(wl, active_hlids);
 }
index fc29c67..8599dab 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/ieee80211.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "cmd.h"
 #include "scan.h"
 #include "acx.h"
@@ -34,6 +35,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
 {
        struct delayed_work *dwork;
        struct wl1271 *wl;
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
        int ret;
        bool is_sta, is_ibss;
 
@@ -50,28 +53,31 @@ void wl1271_scan_complete_work(struct work_struct *work)
        if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
                goto out;
 
+       vif = wl->scan_vif;
+       wlvif = wl12xx_vif_to_data(vif);
+
        wl->scan.state = WL1271_SCAN_STATE_IDLE;
        memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
        wl->scan.req = NULL;
+       wl->scan_vif = NULL;
 
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
-       if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+       if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
                /* restore hardware connection monitoring template */
-               wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+               wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
        }
 
        /* return to ROC if needed */
-       is_sta = (wl->bss_type == BSS_TYPE_STA_BSS);
-       is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
-       if (((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) ||
-            (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) &&
-           !test_bit(wl->dev_role_id, wl->roc_map)) {
+       is_sta = (wlvif->bss_type == BSS_TYPE_STA_BSS);
+       is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
+       if (((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
+            (is_ibss && !test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))) &&
+           !test_bit(wlvif->dev_role_id, wl->roc_map)) {
                /* restore remain on channel */
-               wl12xx_cmd_role_start_dev(wl);
-               wl12xx_roc(wl, wl->dev_role_id);
+               wl12xx_start_dev(wl, wlvif);
        }
        wl1271_ps_elp_sleep(wl);
 
@@ -155,9 +161,11 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
 
 #define WL1271_NOTHING_TO_SCAN 1
 
-static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
-                            bool passive, u32 basic_rate)
+static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
+                           enum ieee80211_band band,
+                           bool passive, u32 basic_rate)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct wl1271_cmd_scan *cmd;
        struct wl1271_cmd_trigger_scan_to *trigger;
        int ret;
@@ -177,11 +185,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
        if (passive)
                scan_options |= WL1271_SCAN_OPT_PASSIVE;
 
-       if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) {
+       if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) {
                ret = -EINVAL;
                goto out;
        }
-       cmd->params.role_id = wl->role_id;
+       cmd->params.role_id = wlvif->role_id;
        cmd->params.scan_options = cpu_to_le16(scan_options);
 
        cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -194,7 +202,6 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
 
        cmd->params.tx_rate = cpu_to_le32(basic_rate);
        cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
-       cmd->params.tx_rate = cpu_to_le32(basic_rate);
        cmd->params.tid_trigger = 0;
        cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
 
@@ -208,11 +215,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
                memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
        }
 
-       memcpy(cmd->addr, wl->mac_addr, ETH_ALEN);
+       memcpy(cmd->addr, vif->addr, ETH_ALEN);
 
-       ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len,
-                                        wl->scan.req->ie, wl->scan.req->ie_len,
-                                        band);
+       ret = wl1271_cmd_build_probe_req(wl, wlvif, wl->scan.ssid,
+                                        wl->scan.ssid_len, wl->scan.req->ie,
+                                        wl->scan.req->ie_len, band);
        if (ret < 0) {
                wl1271_error("PROBE request template failed");
                goto out;
@@ -241,11 +248,12 @@ out:
        return ret;
 }
 
-void wl1271_scan_stm(struct wl1271 *wl)
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret = 0;
        enum ieee80211_band band;
-       u32 rate;
+       u32 rate, mask;
 
        switch (wl->scan.state) {
        case WL1271_SCAN_STATE_IDLE:
@@ -253,47 +261,59 @@ void wl1271_scan_stm(struct wl1271 *wl)
 
        case WL1271_SCAN_STATE_2GHZ_ACTIVE:
                band = IEEE80211_BAND_2GHZ;
-               rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-               ret = wl1271_scan_send(wl, band, false, rate);
+               mask = wlvif->bitrate_masks[band];
+               if (wl->scan.req->no_cck) {
+                       mask &= ~CONF_TX_CCK_RATES;
+                       if (!mask)
+                               mask = CONF_TX_RATE_MASK_BASIC_P2P;
+               }
+               rate = wl1271_tx_min_rate_get(wl, mask);
+               ret = wl1271_scan_send(wl, vif, band, false, rate);
                if (ret == WL1271_NOTHING_TO_SCAN) {
                        wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
-                       wl1271_scan_stm(wl);
+                       wl1271_scan_stm(wl, vif);
                }
 
                break;
 
        case WL1271_SCAN_STATE_2GHZ_PASSIVE:
                band = IEEE80211_BAND_2GHZ;
-               rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-               ret = wl1271_scan_send(wl, band, true, rate);
+               mask = wlvif->bitrate_masks[band];
+               if (wl->scan.req->no_cck) {
+                       mask &= ~CONF_TX_CCK_RATES;
+                       if (!mask)
+                               mask = CONF_TX_RATE_MASK_BASIC_P2P;
+               }
+               rate = wl1271_tx_min_rate_get(wl, mask);
+               ret = wl1271_scan_send(wl, vif, band, true, rate);
                if (ret == WL1271_NOTHING_TO_SCAN) {
                        if (wl->enable_11a)
                                wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
                        else
                                wl->scan.state = WL1271_SCAN_STATE_DONE;
-                       wl1271_scan_stm(wl);
+                       wl1271_scan_stm(wl, vif);
                }
 
                break;
 
        case WL1271_SCAN_STATE_5GHZ_ACTIVE:
                band = IEEE80211_BAND_5GHZ;
-               rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-               ret = wl1271_scan_send(wl, band, false, rate);
+               rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+               ret = wl1271_scan_send(wl, vif, band, false, rate);
                if (ret == WL1271_NOTHING_TO_SCAN) {
                        wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
-                       wl1271_scan_stm(wl);
+                       wl1271_scan_stm(wl, vif);
                }
 
                break;
 
        case WL1271_SCAN_STATE_5GHZ_PASSIVE:
                band = IEEE80211_BAND_5GHZ;
-               rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-               ret = wl1271_scan_send(wl, band, true, rate);
+               rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+               ret = wl1271_scan_send(wl, vif, band, true, rate);
                if (ret == WL1271_NOTHING_TO_SCAN) {
                        wl->scan.state = WL1271_SCAN_STATE_DONE;
-                       wl1271_scan_stm(wl);
+                       wl1271_scan_stm(wl, vif);
                }
 
                break;
@@ -317,7 +337,8 @@ void wl1271_scan_stm(struct wl1271 *wl)
        }
 }
 
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+               const u8 *ssid, size_t ssid_len,
                struct cfg80211_scan_request *req)
 {
        /*
@@ -338,6 +359,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
                wl->scan.ssid_len = 0;
        }
 
+       wl->scan_vif = vif;
        wl->scan.req = req;
        memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
 
@@ -346,7 +368,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
        ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
                                     msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
 
-       wl1271_scan_stm(wl);
+       wl1271_scan_stm(wl, vif);
 
        return 0;
 }
@@ -550,6 +572,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
                         * so they're used in probe requests.
                         */
                        for (i = 0; i < req->n_ssids; i++) {
+                               if (!req->ssids[i].ssid_len)
+                                       continue;
+
                                for (j = 0; j < cmd->n_ssids; j++)
                                        if (!memcmp(req->ssids[i].ssid,
                                                   cmd->ssids[j].ssid,
@@ -585,6 +610,7 @@ out:
 }
 
 int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+                                 struct wl12xx_vif *wlvif,
                                  struct cfg80211_sched_scan_request *req,
                                  struct ieee80211_sched_scan_ies *ies)
 {
@@ -631,7 +657,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        }
 
        if (!force_passive && cfg->active[0]) {
-               ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
+               ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
                                                 ies->ie[IEEE80211_BAND_2GHZ],
                                                 ies->len[IEEE80211_BAND_2GHZ],
@@ -643,7 +669,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        }
 
        if (!force_passive && cfg->active[1]) {
-               ret = wl1271_cmd_build_probe_req(wl,  req->ssids[0].ssid,
+               ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
                                                 ies->ie[IEEE80211_BAND_5GHZ],
                                                 ies->len[IEEE80211_BAND_5GHZ],
@@ -667,14 +693,14 @@ out:
        return ret;
 }
 
-int wl1271_scan_sched_scan_start(struct wl1271 *wl)
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl1271_cmd_sched_scan_start *start;
        int ret = 0;
 
        wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
 
-       if (wl->bss_type != BSS_TYPE_STA_BSS)
+       if (wlvif->bss_type != BSS_TYPE_STA_BSS)
                return -EOPNOTSUPP;
 
        if (!test_bit(WL1271_FLAG_IDLE, &wl->flags))
index 9211515..a7ed43d 100644 (file)
 
 #include "wl12xx.h"
 
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+               const u8 *ssid, size_t ssid_len,
                struct cfg80211_scan_request *req);
 int wl1271_scan_stop(struct wl1271 *wl);
 int wl1271_scan_build_probe_req(struct wl1271 *wl,
                                const u8 *ssid, size_t ssid_len,
                                const u8 *ie, size_t ie_len, u8 band);
-void wl1271_scan_stm(struct wl1271 *wl);
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif);
 void wl1271_scan_complete_work(struct work_struct *work);
 int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+                                    struct wl12xx_vif *wlvif,
                                     struct cfg80211_sched_scan_request *req,
                                     struct ieee80211_sched_scan_ies *ies);
-int wl1271_scan_sched_scan_start(struct wl1271 *wl);
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl1271_scan_sched_scan_stop(struct wl1271 *wl);
 void wl1271_scan_sched_scan_results(struct wl1271 *wl);
 
index 516a898..468a505 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/vmalloc.h>
+#include <linux/platform_device.h>
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/card.h>
 #define SDIO_DEVICE_ID_TI_WL1271       0x4076
 #endif
 
+struct wl12xx_sdio_glue {
+       struct device *dev;
+       struct platform_device *core;
+};
+
 static const struct sdio_device_id wl1271_devices[] __devinitconst = {
        { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
        {}
 };
 MODULE_DEVICE_TABLE(sdio, wl1271_devices);
 
-static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz)
-{
-       sdio_claim_host(wl->if_priv);
-       sdio_set_block_size(wl->if_priv, blksz);
-       sdio_release_host(wl->if_priv);
-}
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
-       return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
-       return &(wl_to_func(wl)->dev);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
+static void wl1271_sdio_set_block_size(struct device *child,
+                                      unsigned int blksz)
 {
-       struct wl1271 *wl = cookie;
-       unsigned long flags;
+       struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+       struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
-       wl1271_debug(DEBUG_IRQ, "IRQ");
-
-       /* complete the ELP completion */
-       spin_lock_irqsave(&wl->wl_lock, flags);
-       set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
-       if (wl->elp_compl) {
-               complete(wl->elp_compl);
-               wl->elp_compl = NULL;
-       }
-
-       if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
-               /* don't enqueue a work right now. mark it as pending */
-               set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
-               wl1271_debug(DEBUG_IRQ, "should not enqueue work");
-               disable_irq_nosync(wl->irq);
-               pm_wakeup_event(wl1271_sdio_wl_to_dev(wl), 0);
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
-               return IRQ_HANDLED;
-       }
-       spin_unlock_irqrestore(&wl->wl_lock, flags);
-
-       return IRQ_WAKE_THREAD;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
-       disable_irq(wl->irq);
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
-       enable_irq(wl->irq);
+       sdio_claim_host(func);
+       sdio_set_block_size(func, blksz);
+       sdio_release_host(func);
 }
 
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
                                 size_t len, bool fixed)
 {
        int ret;
-       struct sdio_func *func = wl_to_func(wl);
+       struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+       struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
        if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
                ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
-               wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
-                            addr, ((u8 *)buf)[0]);
+               dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
+                       addr, ((u8 *)buf)[0]);
        } else {
                if (fixed)
                        ret = sdio_readsb(func, buf, addr, len);
                else
                        ret = sdio_memcpy_fromio(func, buf, addr, len);
 
-               wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
-                            addr, len);
-               wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+               dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n",
+                       addr, len);
        }
 
        if (ret)
-               wl1271_error("sdio read failed (%d)", ret);
+               dev_err(child->parent, "sdio read failed (%d)\n", ret);
 }
 
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
                                  size_t len, bool fixed)
 {
        int ret;
-       struct sdio_func *func = wl_to_func(wl);
+       struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+       struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
        if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
                sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
-               wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
-                            addr, ((u8 *)buf)[0]);
+               dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
+                       addr, ((u8 *)buf)[0]);
        } else {
-               wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
-                            addr, len);
-               wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+               dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n",
+                       addr, len);
 
                if (fixed)
                        ret = sdio_writesb(func, addr, buf, len);
@@ -153,13 +114,13 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
        }
 
        if (ret)
-               wl1271_error("sdio write failed (%d)", ret);
+               dev_err(child->parent, "sdio write failed (%d)\n", ret);
 }
 
-static int wl1271_sdio_power_on(struct wl1271 *wl)
+static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
 {
-       struct sdio_func *func = wl_to_func(wl);
        int ret;
+       struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
        /* If enabled, tell runtime PM not to power off the card */
        if (pm_runtime_enabled(&func->dev)) {
@@ -180,10 +141,10 @@ out:
        return ret;
 }
 
-static int wl1271_sdio_power_off(struct wl1271 *wl)
+static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
 {
-       struct sdio_func *func = wl_to_func(wl);
        int ret;
+       struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
        sdio_disable_func(func);
        sdio_release_host(func);
@@ -200,46 +161,43 @@ static int wl1271_sdio_power_off(struct wl1271 *wl)
        return ret;
 }
 
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
+static int wl12xx_sdio_set_power(struct device *child, bool enable)
 {
+       struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+
        if (enable)
-               return wl1271_sdio_power_on(wl);
+               return wl12xx_sdio_power_on(glue);
        else
-               return wl1271_sdio_power_off(wl);
+               return wl12xx_sdio_power_off(glue);
 }
 
 static struct wl1271_if_operations sdio_ops = {
-       .read           = wl1271_sdio_raw_read,
-       .write          = wl1271_sdio_raw_write,
-       .power          = wl1271_sdio_set_power,
-       .dev            = wl1271_sdio_wl_to_dev,
-       .enable_irq     = wl1271_sdio_enable_interrupts,
-       .disable_irq    = wl1271_sdio_disable_interrupts,
+       .read           = wl12xx_sdio_raw_read,
+       .write          = wl12xx_sdio_raw_write,
+       .power          = wl12xx_sdio_set_power,
        .set_block_size = wl1271_sdio_set_block_size,
 };
 
 static int __devinit wl1271_probe(struct sdio_func *func,
                                  const struct sdio_device_id *id)
 {
-       struct ieee80211_hw *hw;
-       const struct wl12xx_platform_data *wlan_data;
-       struct wl1271 *wl;
-       unsigned long irqflags;
+       struct wl12xx_platform_data *wlan_data;
+       struct wl12xx_sdio_glue *glue;
+       struct resource res[1];
        mmc_pm_flag_t mmcflags;
-       int ret;
+       int ret = -ENOMEM;
 
        /* We are only able to handle the wlan function */
        if (func->num != 0x02)
                return -ENODEV;
 
-       hw = wl1271_alloc_hw();
-       if (IS_ERR(hw))
-               return PTR_ERR(hw);
-
-       wl = hw->priv;
+       glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+       if (!glue) {
+               dev_err(&func->dev, "can't allocate glue\n");
+               goto out;
+       }
 
-       wl->if_priv = func;
-       wl->if_ops = &sdio_ops;
+       glue->dev = &func->dev;
 
        /* Grab access to FN0 for ELP reg. */
        func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
@@ -250,80 +208,79 @@ static int __devinit wl1271_probe(struct sdio_func *func,
        wlan_data = wl12xx_get_platform_data();
        if (IS_ERR(wlan_data)) {
                ret = PTR_ERR(wlan_data);
-               wl1271_error("missing wlan platform data: %d", ret);
-               goto out_free;
+               dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
+               goto out_free_glue;
        }
 
-       wl->irq = wlan_data->irq;
-       wl->ref_clock = wlan_data->board_ref_clock;
-       wl->tcxo_clock = wlan_data->board_tcxo_clock;
-       wl->platform_quirks = wlan_data->platform_quirks;
+       /* if sdio can keep power while host is suspended, enable wow */
+       mmcflags = sdio_get_host_pm_caps(func);
+       dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
 
-       if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
-               irqflags = IRQF_TRIGGER_RISING;
-       else
-               irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
-
-       ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
-                                  irqflags,
-                                  DRIVER_NAME, wl);
-       if (ret < 0) {
-               wl1271_error("request_irq() failed: %d", ret);
-               goto out_free;
-       }
+       if (mmcflags & MMC_PM_KEEP_POWER)
+               wlan_data->pwr_in_suspend = true;
+
+       wlan_data->ops = &sdio_ops;
 
-       ret = enable_irq_wake(wl->irq);
-       if (!ret) {
-               wl->irq_wake_enabled = true;
-               device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1);
+       sdio_set_drvdata(func, glue);
 
-               /* if sdio can keep power while host is suspended, enable wow */
-               mmcflags = sdio_get_host_pm_caps(func);
-               wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags);
+       /* Tell PM core that we don't need the card to be powered now */
+       pm_runtime_put_noidle(&func->dev);
 
-               if (mmcflags & MMC_PM_KEEP_POWER)
-                       hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+       glue->core = platform_device_alloc("wl12xx", -1);
+       if (!glue->core) {
+               dev_err(glue->dev, "can't allocate platform_device");
+               ret = -ENOMEM;
+               goto out_free_glue;
        }
-       disable_irq(wl->irq);
 
-       ret = wl1271_init_ieee80211(wl);
-       if (ret)
-               goto out_irq;
+       glue->core->dev.parent = &func->dev;
 
-       ret = wl1271_register_hw(wl);
-       if (ret)
-               goto out_irq;
+       memset(res, 0x00, sizeof(res));
 
-       sdio_set_drvdata(func, wl);
+       res[0].start = wlan_data->irq;
+       res[0].flags = IORESOURCE_IRQ;
+       res[0].name = "irq";
 
-       /* Tell PM core that we don't need the card to be powered now */
-       pm_runtime_put_noidle(&func->dev);
+       ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+       if (ret) {
+               dev_err(glue->dev, "can't add resources\n");
+               goto out_dev_put;
+       }
 
+       ret = platform_device_add_data(glue->core, wlan_data,
+                                      sizeof(*wlan_data));
+       if (ret) {
+               dev_err(glue->dev, "can't add platform data\n");
+               goto out_dev_put;
+       }
+
+       ret = platform_device_add(glue->core);
+       if (ret) {
+               dev_err(glue->dev, "can't add platform device\n");
+               goto out_dev_put;
+       }
        return 0;
 
- out_irq:
-       free_irq(wl->irq, wl);
+out_dev_put:
+       platform_device_put(glue->core);
 
- out_free:
-       wl1271_free_hw(wl);
+out_free_glue:
+       kfree(glue);
 
+out:
        return ret;
 }
 
 static void __devexit wl1271_remove(struct sdio_func *func)
 {
-       struct wl1271 *wl = sdio_get_drvdata(func);
+       struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
 
        /* Undo decrement done above in wl1271_probe */
        pm_runtime_get_noresume(&func->dev);
 
-       wl1271_unregister_hw(wl);
-       if (wl->irq_wake_enabled) {
-               device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0);
-               disable_irq_wake(wl->irq);
-       }
-       free_irq(wl->irq, wl);
-       wl1271_free_hw(wl);
+       platform_device_del(glue->core);
+       platform_device_put(glue->core);
+       kfree(glue);
 }
 
 #ifdef CONFIG_PM
@@ -332,20 +289,21 @@ static int wl1271_suspend(struct device *dev)
        /* Tell MMC/SDIO core it's OK to power down the card
         * (if it isn't already), but not to remove it completely */
        struct sdio_func *func = dev_to_sdio_func(dev);
-       struct wl1271 *wl = sdio_get_drvdata(func);
+       struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+       struct wl1271 *wl = platform_get_drvdata(glue->core);
        mmc_pm_flag_t sdio_flags;
        int ret = 0;
 
-       wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d",
-                    wl->wow_enabled);
+       dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
+               wl->wow_enabled);
 
        /* check whether sdio should keep power */
        if (wl->wow_enabled) {
                sdio_flags = sdio_get_host_pm_caps(func);
 
                if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
-                       wl1271_error("can't keep power while host "
-                                    "is suspended");
+                       dev_err(dev, "can't keep power while host "
+                                    "is suspended\n");
                        ret = -EINVAL;
                        goto out;
                }
@@ -353,7 +311,7 @@ static int wl1271_suspend(struct device *dev)
                /* keep power while host suspended */
                ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
                if (ret) {
-                       wl1271_error("error while trying to keep power");
+                       dev_err(dev, "error while trying to keep power\n");
                        goto out;
                }
 
@@ -367,9 +325,10 @@ out:
 static int wl1271_resume(struct device *dev)
 {
        struct sdio_func *func = dev_to_sdio_func(dev);
-       struct wl1271 *wl = sdio_get_drvdata(func);
+       struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+       struct wl1271 *wl = platform_get_drvdata(glue->core);
 
-       wl1271_debug(DEBUG_MAC80211, "wl1271 resume");
+       dev_dbg(dev, "wl1271 resume\n");
        if (wl->wow_enabled) {
                /* claim back host */
                sdio_claim_host(func);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
deleted file mode 100644 (file)
index f25d5d9..0000000
+++ /dev/null
@@ -1,543 +0,0 @@
-/*
- * SDIO testing driver for wl12xx
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Contact: Roger Quadros <roger.quadros@nokia.com>
- *
- * wl12xx read/write routines taken from the main module
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/crc7.h>
-#include <linux/vmalloc.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/gpio.h>
-#include <linux/wl12xx.h>
-#include <linux/kthread.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-
-#include "wl12xx.h"
-#include "io.h"
-#include "boot.h"
-
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI              0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271       0x4076
-#endif
-
-static bool rx, tx;
-
-module_param(rx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx, "Perform rx test. Default (0). "
-       "This test continuously reads data from the SDIO device.\n");
-
-module_param(tx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(tx, "Perform tx test. Default (0). "
-       "This test continuously writes data to the SDIO device.\n");
-
-struct wl1271_test {
-       struct wl1271 wl;
-       struct task_struct *test_task;
-};
-
-static const struct sdio_device_id wl1271_devices[] = {
-       { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
-       {}
-};
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
-       return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
-       return &(wl_to_func(wl)->dev);
-}
-
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
-               size_t len, bool fixed)
-{
-       int ret = 0;
-       struct sdio_func *func = wl_to_func(wl);
-
-       if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
-               ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
-               wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
-                               addr, ((u8 *)buf)[0]);
-       } else {
-               if (fixed)
-                       ret = sdio_readsb(func, buf, addr, len);
-               else
-                       ret = sdio_memcpy_fromio(func, buf, addr, len);
-
-               wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
-                               addr, len);
-               wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
-       }
-
-       if (ret)
-               wl1271_error("sdio read failed (%d)", ret);
-}
-
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
-               size_t len, bool fixed)
-{
-       int ret = 0;
-       struct sdio_func *func = wl_to_func(wl);
-
-       if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
-               sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
-               wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
-                               addr, ((u8 *)buf)[0]);
-       } else {
-               wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
-                               addr, len);
-               wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
-
-               if (fixed)
-                       ret = sdio_writesb(func, addr, buf, len);
-               else
-                       ret = sdio_memcpy_toio(func, addr, buf, len);
-       }
-       if (ret)
-               wl1271_error("sdio write failed (%d)", ret);
-
-}
-
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
-{
-       struct sdio_func *func = wl_to_func(wl);
-       int ret;
-
-       /* Let the SDIO stack handle wlan_enable control, so we
-        * keep host claimed while wlan is in use to keep wl1271
-        * alive.
-        */
-       if (enable) {
-               /* Power up the card */
-               ret = pm_runtime_get_sync(&func->dev);
-               if (ret < 0)
-                       goto out;
-
-               /* Runtime PM might be disabled, power up the card manually */
-               ret = mmc_power_restore_host(func->card->host);
-               if (ret < 0)
-                       goto out;
-
-               sdio_claim_host(func);
-               sdio_enable_func(func);
-       } else {
-               sdio_disable_func(func);
-               sdio_release_host(func);
-
-               /* Runtime PM might be disabled, power off the card manually */
-               ret = mmc_power_save_host(func->card->host);
-               if (ret < 0)
-                       goto out;
-
-               /* Power down the card */
-               ret = pm_runtime_put_sync(&func->dev);
-       }
-
-out:
-       return ret;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
-}
-
-
-static struct wl1271_if_operations sdio_ops = {
-       .read           = wl1271_sdio_raw_read,
-       .write          = wl1271_sdio_raw_write,
-       .power          = wl1271_sdio_set_power,
-       .dev            = wl1271_sdio_wl_to_dev,
-       .enable_irq     = wl1271_sdio_enable_interrupts,
-       .disable_irq    = wl1271_sdio_disable_interrupts,
-};
-
-static void wl1271_fw_wakeup(struct wl1271 *wl)
-{
-       u32 elp_reg;
-
-       elp_reg = ELPCTRL_WAKE_UP;
-       wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
-}
-
-static int wl1271_fetch_firmware(struct wl1271 *wl)
-{
-       const struct firmware *fw;
-       int ret;
-
-       if (wl->chip.id == CHIP_ID_1283_PG20)
-               ret = request_firmware(&fw, WL128X_FW_NAME,
-                                      wl1271_wl_to_dev(wl));
-       else
-               ret = request_firmware(&fw, WL127X_FW_NAME,
-                                      wl1271_wl_to_dev(wl));
-
-       if (ret < 0) {
-               wl1271_error("could not get firmware: %d", ret);
-               return ret;
-       }
-
-       if (fw->size % 4) {
-               wl1271_error("firmware size is not multiple of 32 bits: %zu",
-                               fw->size);
-               ret = -EILSEQ;
-               goto out;
-       }
-
-       wl->fw_len = fw->size;
-       wl->fw = vmalloc(wl->fw_len);
-
-       if (!wl->fw) {
-               wl1271_error("could not allocate memory for the firmware");
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       memcpy(wl->fw, fw->data, wl->fw_len);
-
-       ret = 0;
-
-out:
-       release_firmware(fw);
-
-       return ret;
-}
-
-static int wl1271_fetch_nvs(struct wl1271 *wl)
-{
-       const struct firmware *fw;
-       int ret;
-
-       ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
-
-       if (ret < 0) {
-               wl1271_error("could not get nvs file: %d", ret);
-               return ret;
-       }
-
-       wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
-
-       if (!wl->nvs) {
-               wl1271_error("could not allocate memory for the nvs file");
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       wl->nvs_len = fw->size;
-
-out:
-       release_firmware(fw);
-
-       return ret;
-}
-
-static int wl1271_chip_wakeup(struct wl1271 *wl)
-{
-       struct wl1271_partition_set partition;
-       int ret;
-
-       msleep(WL1271_PRE_POWER_ON_SLEEP);
-       ret = wl1271_power_on(wl);
-       if (ret)
-               return ret;
-
-       msleep(WL1271_POWER_ON_SLEEP);
-
-       /* We don't need a real memory partition here, because we only want
-        * to use the registers at this point. */
-       memset(&partition, 0, sizeof(partition));
-       partition.reg.start = REGISTERS_BASE;
-       partition.reg.size = REGISTERS_DOWN_SIZE;
-       wl1271_set_partition(wl, &partition);
-
-       /* ELP module wake up */
-       wl1271_fw_wakeup(wl);
-
-       /* whal_FwCtrl_BootSm() */
-
-       /* 0. read chip id from CHIP_ID */
-       wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
-
-       /* 1. check if chip id is valid */
-
-       switch (wl->chip.id) {
-       case CHIP_ID_1271_PG10:
-               wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
-                               wl->chip.id);
-               break;
-       case CHIP_ID_1271_PG20:
-               wl1271_notice("chip id 0x%x (1271 PG20)",
-                               wl->chip.id);
-               break;
-       case CHIP_ID_1283_PG20:
-               wl1271_notice("chip id 0x%x (1283 PG20)",
-                               wl->chip.id);
-               break;
-       case CHIP_ID_1283_PG10:
-       default:
-               wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
-               return -ENODEV;
-       }
-
-       return ret;
-}
-
-static struct wl1271_partition_set part_down = {
-       .mem = {
-               .start = 0x00000000,
-               .size  = 0x000177c0
-       },
-       .reg = {
-               .start = REGISTERS_BASE,
-               .size  = 0x00008800
-       },
-       .mem2 = {
-               .start = 0x00000000,
-               .size  = 0x00000000
-       },
-       .mem3 = {
-               .start = 0x00000000,
-               .size  = 0x00000000
-       },
-};
-
-static int tester(void *data)
-{
-       struct wl1271 *wl = data;
-       struct sdio_func *func = wl_to_func(wl);
-       struct device *pdev = &func->dev;
-       int ret = 0;
-       bool rx_started = 0;
-       bool tx_started = 0;
-       uint8_t *tx_buf, *rx_buf;
-       int test_size = PAGE_SIZE;
-       u32 addr = 0;
-       struct wl1271_partition_set partition;
-
-       /* We assume chip is powered up and firmware fetched */
-
-       memcpy(&partition, &part_down, sizeof(partition));
-       partition.mem.start = addr;
-       wl1271_set_partition(wl, &partition);
-
-       tx_buf = kmalloc(test_size, GFP_KERNEL);
-       rx_buf = kmalloc(test_size, GFP_KERNEL);
-       if (!tx_buf || !rx_buf) {
-               dev_err(pdev,
-                       "Could not allocate memory. Test will not run.\n");
-               ret = -ENOMEM;
-               goto free;
-       }
-
-       memset(tx_buf, 0x5a, test_size);
-
-       /* write something in data area so we can read it back */
-       wl1271_write(wl, addr, tx_buf, test_size, false);
-
-       while (!kthread_should_stop()) {
-               if (rx && !rx_started) {
-                       dev_info(pdev, "starting rx test\n");
-                       rx_started = 1;
-               } else if (!rx && rx_started) {
-                       dev_info(pdev, "stopping rx test\n");
-                       rx_started = 0;
-               }
-
-               if (tx && !tx_started) {
-                       dev_info(pdev, "starting tx test\n");
-                       tx_started = 1;
-               } else if (!tx && tx_started) {
-                       dev_info(pdev, "stopping tx test\n");
-                       tx_started = 0;
-               }
-
-               if (rx_started)
-                       wl1271_read(wl, addr, rx_buf, test_size, false);
-
-               if (tx_started)
-                       wl1271_write(wl, addr, tx_buf, test_size, false);
-
-               if (!rx_started && !tx_started)
-                       msleep(100);
-       }
-
-free:
-       kfree(tx_buf);
-       kfree(rx_buf);
-       return ret;
-}
-
-static int __devinit wl1271_probe(struct sdio_func *func,
-               const struct sdio_device_id *id)
-{
-       const struct wl12xx_platform_data *wlan_data;
-       struct wl1271 *wl;
-       struct wl1271_test *wl_test;
-       int ret = 0;
-
-       /* wl1271 has 2 sdio functions we handle just the wlan part */
-       if (func->num != 0x02)
-               return -ENODEV;
-
-       wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL);
-       if (!wl_test) {
-               dev_err(&func->dev, "Could not allocate memory\n");
-               return -ENOMEM;
-       }
-
-       wl = &wl_test->wl;
-
-       wl->if_priv = func;
-       wl->if_ops = &sdio_ops;
-
-       /* Grab access to FN0 for ELP reg. */
-       func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
-
-       /* Use block mode for transferring over one block size of data */
-       func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
-
-       wlan_data = wl12xx_get_platform_data();
-       if (IS_ERR(wlan_data)) {
-               ret = PTR_ERR(wlan_data);
-               dev_err(&func->dev, "missing wlan platform data: %d\n", ret);
-               goto out_free;
-       }
-
-       wl->irq = wlan_data->irq;
-       wl->ref_clock = wlan_data->board_ref_clock;
-       wl->tcxo_clock = wlan_data->board_tcxo_clock;
-
-       sdio_set_drvdata(func, wl_test);
-
-       /* power up the device */
-       ret = wl1271_chip_wakeup(wl);
-       if (ret) {
-               dev_err(&func->dev, "could not wake up chip\n");
-               goto out_free;
-       }
-
-       if (wl->fw == NULL) {
-               ret = wl1271_fetch_firmware(wl);
-               if (ret < 0) {
-                       dev_err(&func->dev, "firmware fetch error\n");
-                       goto out_off;
-               }
-       }
-
-       /* fetch NVS */
-       if (wl->nvs == NULL) {
-               ret = wl1271_fetch_nvs(wl);
-               if (ret < 0) {
-                       dev_err(&func->dev, "NVS fetch error\n");
-                       goto out_off;
-               }
-       }
-
-       ret = wl1271_load_firmware(wl);
-       if (ret < 0) {
-               dev_err(&func->dev, "firmware load error: %d\n", ret);
-               goto out_free;
-       }
-
-       dev_info(&func->dev, "initialized\n");
-
-       /* I/O testing will be done in the tester thread */
-
-       wl_test->test_task = kthread_run(tester, wl, "sdio_tester");
-       if (IS_ERR(wl_test->test_task)) {
-               dev_err(&func->dev, "unable to create kernel thread\n");
-               ret = PTR_ERR(wl_test->test_task);
-               goto out_free;
-       }
-
-       return 0;
-
-out_off:
-       /* power off the chip */
-       wl1271_power_off(wl);
-
-out_free:
-       kfree(wl_test);
-       return ret;
-}
-
-static void __devexit wl1271_remove(struct sdio_func *func)
-{
-       struct wl1271_test *wl_test = sdio_get_drvdata(func);
-
-       /* stop the I/O test thread */
-       kthread_stop(wl_test->test_task);
-
-       /* power off the chip */
-       wl1271_power_off(&wl_test->wl);
-
-       vfree(wl_test->wl.fw);
-       wl_test->wl.fw = NULL;
-       kfree(wl_test->wl.nvs);
-       wl_test->wl.nvs = NULL;
-
-       kfree(wl_test);
-}
-
-static struct sdio_driver wl1271_sdio_driver = {
-       .name           = "wl12xx_sdio_test",
-       .id_table       = wl1271_devices,
-       .probe          = wl1271_probe,
-       .remove         = __devexit_p(wl1271_remove),
-};
-
-static int __init wl1271_init(void)
-{
-       int ret;
-
-       ret = sdio_register_driver(&wl1271_sdio_driver);
-       if (ret < 0)
-               pr_err("failed to register sdio driver: %d\n", ret);
-
-       return ret;
-}
-module_init(wl1271_init);
-
-static void __exit wl1271_exit(void)
-{
-       sdio_unregister_driver(&wl1271_sdio_driver);
-}
-module_exit(wl1271_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Roger Quadros <roger.quadros@nokia.com>");
-
index 0f97186..92caa7c 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/crc7.h>
 #include <linux/spi/spi.h>
 #include <linux/wl12xx.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 
 #include "wl12xx.h"
 
 #define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
 
-static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
-{
-       return wl->if_priv;
-}
-
-static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
-{
-       return &(wl_to_spi(wl)->dev);
-}
-
-static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
-{
-       disable_irq(wl->irq);
-}
-
-static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
-{
-       enable_irq(wl->irq);
-}
+struct wl12xx_spi_glue {
+       struct device *dev;
+       struct platform_device *core;
+};
 
-static void wl1271_spi_reset(struct wl1271 *wl)
+static void wl12xx_spi_reset(struct device *child)
 {
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
        u8 *cmd;
        struct spi_transfer t;
        struct spi_message m;
 
        cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
        if (!cmd) {
-               wl1271_error("could not allocate cmd for spi reset");
+               dev_err(child->parent,
+                       "could not allocate cmd for spi reset\n");
                return;
        }
 
@@ -110,21 +98,22 @@ static void wl1271_spi_reset(struct wl1271 *wl)
        t.len = WSPI_INIT_CMD_LEN;
        spi_message_add_tail(&t, &m);
 
-       spi_sync(wl_to_spi(wl), &m);
+       spi_sync(to_spi_device(glue->dev), &m);
 
-       wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
        kfree(cmd);
 }
 
-static void wl1271_spi_init(struct wl1271 *wl)
+static void wl12xx_spi_init(struct device *child)
 {
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
        u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
        struct spi_transfer t;
        struct spi_message m;
 
        cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
        if (!cmd) {
-               wl1271_error("could not allocate cmd for spi init");
+               dev_err(child->parent,
+                       "could not allocate cmd for spi init\n");
                return;
        }
 
@@ -165,15 +154,16 @@ static void wl1271_spi_init(struct wl1271 *wl)
        t.len = WSPI_INIT_CMD_LEN;
        spi_message_add_tail(&t, &m);
 
-       spi_sync(wl_to_spi(wl), &m);
-       wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+       spi_sync(to_spi_device(glue->dev), &m);
        kfree(cmd);
 }
 
 #define WL1271_BUSY_WORD_TIMEOUT 1000
 
-static int wl1271_spi_read_busy(struct wl1271 *wl)
+static int wl12xx_spi_read_busy(struct device *child)
 {
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+       struct wl1271 *wl = dev_get_drvdata(child);
        struct spi_transfer t[1];
        struct spi_message m;
        u32 *busy_buf;
@@ -194,20 +184,22 @@ static int wl1271_spi_read_busy(struct wl1271 *wl)
                t[0].len = sizeof(u32);
                t[0].cs_change = true;
                spi_message_add_tail(&t[0], &m);
-               spi_sync(wl_to_spi(wl), &m);
+               spi_sync(to_spi_device(glue->dev), &m);
 
                if (*busy_buf & 0x1)
                        return 0;
        }
 
        /* The SPI bus is unresponsive, the read failed. */
-       wl1271_error("SPI read busy-word timeout!\n");
+       dev_err(child->parent, "SPI read busy-word timeout!\n");
        return -ETIMEDOUT;
 }
 
-static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
                                size_t len, bool fixed)
 {
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+       struct wl1271 *wl = dev_get_drvdata(child);
        struct spi_transfer t[2];
        struct spi_message m;
        u32 *busy_buf;
@@ -243,10 +235,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
                t[1].cs_change = true;
                spi_message_add_tail(&t[1], &m);
 
-               spi_sync(wl_to_spi(wl), &m);
+               spi_sync(to_spi_device(glue->dev), &m);
 
                if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
-                   wl1271_spi_read_busy(wl)) {
+                   wl12xx_spi_read_busy(child)) {
                        memset(buf, 0, chunk_len);
                        return;
                }
@@ -259,10 +251,7 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
                t[0].cs_change = true;
                spi_message_add_tail(&t[0], &m);
 
-               spi_sync(wl_to_spi(wl), &m);
-
-               wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
-               wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len);
+               spi_sync(to_spi_device(glue->dev), &m);
 
                if (!fixed)
                        addr += chunk_len;
@@ -271,9 +260,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
        }
 }
 
-static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
-                         size_t len, bool fixed)
+static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
+                                size_t len, bool fixed)
 {
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
        struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
        struct spi_message m;
        u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
@@ -308,9 +298,6 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
                t[i].len = chunk_len;
                spi_message_add_tail(&t[i++], &m);
 
-               wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
-               wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len);
-
                if (!fixed)
                        addr += chunk_len;
                buf += chunk_len;
@@ -318,72 +305,41 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
                cmd++;
        }
 
-       spi_sync(wl_to_spi(wl), &m);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
-{
-       struct wl1271 *wl = cookie;
-       unsigned long flags;
-
-       wl1271_debug(DEBUG_IRQ, "IRQ");
-
-       /* complete the ELP completion */
-       spin_lock_irqsave(&wl->wl_lock, flags);
-       set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
-       if (wl->elp_compl) {
-               complete(wl->elp_compl);
-               wl->elp_compl = NULL;
-       }
-       spin_unlock_irqrestore(&wl->wl_lock, flags);
-
-       return IRQ_WAKE_THREAD;
-}
-
-static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
-{
-       if (wl->set_power)
-               wl->set_power(enable);
-
-       return 0;
+       spi_sync(to_spi_device(glue->dev), &m);
 }
 
 static struct wl1271_if_operations spi_ops = {
-       .read           = wl1271_spi_raw_read,
-       .write          = wl1271_spi_raw_write,
-       .reset          = wl1271_spi_reset,
-       .init           = wl1271_spi_init,
-       .power          = wl1271_spi_set_power,
-       .dev            = wl1271_spi_wl_to_dev,
-       .enable_irq     = wl1271_spi_enable_interrupts,
-       .disable_irq    = wl1271_spi_disable_interrupts,
+       .read           = wl12xx_spi_raw_read,
+       .write          = wl12xx_spi_raw_write,
+       .reset          = wl12xx_spi_reset,
+       .init           = wl12xx_spi_init,
        .set_block_size = NULL,
 };
 
 static int __devinit wl1271_probe(struct spi_device *spi)
 {
+       struct wl12xx_spi_glue *glue;
        struct wl12xx_platform_data *pdata;
-       struct ieee80211_hw *hw;
-       struct wl1271 *wl;
-       unsigned long irqflags;
-       int ret;
+       struct resource res[1];
+       int ret = -ENOMEM;
 
        pdata = spi->dev.platform_data;
        if (!pdata) {
-               wl1271_error("no platform data");
+               dev_err(&spi->dev, "no platform data\n");
                return -ENODEV;
        }
 
-       hw = wl1271_alloc_hw();
-       if (IS_ERR(hw))
-               return PTR_ERR(hw);
+       pdata->ops = &spi_ops;
 
-       wl = hw->priv;
+       glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+       if (!glue) {
+               dev_err(&spi->dev, "can't allocate glue\n");
+               goto out;
+       }
 
-       dev_set_drvdata(&spi->dev, wl);
-       wl->if_priv = spi;
+       glue->dev = &spi->dev;
 
-       wl->if_ops = &spi_ops;
+       spi_set_drvdata(spi, glue);
 
        /* This is the only SPI value that we need to set here, the rest
         * comes from the board-peripherals file */
@@ -391,69 +347,61 @@ static int __devinit wl1271_probe(struct spi_device *spi)
 
        ret = spi_setup(spi);
        if (ret < 0) {
-               wl1271_error("spi_setup failed");
-               goto out_free;
+               dev_err(glue->dev, "spi_setup failed\n");
+               goto out_free_glue;
        }
 
-       wl->set_power = pdata->set_power;
-       if (!wl->set_power) {
-               wl1271_error("set power function missing in platform data");
-               ret = -ENODEV;
-               goto out_free;
+       glue->core = platform_device_alloc("wl12xx", -1);
+       if (!glue->core) {
+               dev_err(glue->dev, "can't allocate platform_device\n");
+               ret = -ENOMEM;
+               goto out_free_glue;
        }
 
-       wl->ref_clock = pdata->board_ref_clock;
-       wl->tcxo_clock = pdata->board_tcxo_clock;
-       wl->platform_quirks = pdata->platform_quirks;
+       glue->core->dev.parent = &spi->dev;
 
-       if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
-               irqflags = IRQF_TRIGGER_RISING;
-       else
-               irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+       memset(res, 0x00, sizeof(res));
 
-       wl->irq = spi->irq;
-       if (wl->irq < 0) {
-               wl1271_error("irq missing in platform data");
-               ret = -ENODEV;
-               goto out_free;
-       }
+       res[0].start = spi->irq;
+       res[0].flags = IORESOURCE_IRQ;
+       res[0].name = "irq";
 
-       ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
-                                  irqflags,
-                                  DRIVER_NAME, wl);
-       if (ret < 0) {
-               wl1271_error("request_irq() failed: %d", ret);
-               goto out_free;
+       ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+       if (ret) {
+               dev_err(glue->dev, "can't add resources\n");
+               goto out_dev_put;
        }
 
-       disable_irq(wl->irq);
-
-       ret = wl1271_init_ieee80211(wl);
-       if (ret)
-               goto out_irq;
+       ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
+       if (ret) {
+               dev_err(glue->dev, "can't add platform data\n");
+               goto out_dev_put;
+       }
 
-       ret = wl1271_register_hw(wl);
-       if (ret)
-               goto out_irq;
+       ret = platform_device_add(glue->core);
+       if (ret) {
+               dev_err(glue->dev, "can't register platform device\n");
+               goto out_dev_put;
+       }
 
        return 0;
 
- out_irq:
-       free_irq(wl->irq, wl);
-
- out_free:
-       wl1271_free_hw(wl);
+out_dev_put:
+       platform_device_put(glue->core);
 
+out_free_glue:
+       kfree(glue);
+out:
        return ret;
 }
 
 static int __devexit wl1271_remove(struct spi_device *spi)
 {
-       struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+       struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
 
-       wl1271_unregister_hw(wl);
-       free_irq(wl->irq, wl);
-       wl1271_free_hw(wl);
+       platform_device_del(glue->core);
+       platform_device_put(glue->core);
+       kfree(glue);
 
        return 0;
 }
@@ -462,7 +410,6 @@ static int __devexit wl1271_remove(struct spi_device *spi)
 static struct spi_driver wl1271_spi_driver = {
        .driver = {
                .name           = "wl1271_spi",
-               .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
        },
 
index 4ae8eff..25093c0 100644 (file)
 #include <net/genetlink.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "acx.h"
 #include "reg.h"
+#include "ps.h"
 
 #define WL1271_TM_MAX_DATA_LENGTH 1024
 
@@ -36,6 +38,7 @@ enum wl1271_tm_commands {
        WL1271_TM_CMD_TEST,
        WL1271_TM_CMD_INTERROGATE,
        WL1271_TM_CMD_CONFIGURE,
+       WL1271_TM_CMD_NVS_PUSH,         /* Not in use. Keep to not break ABI */
        WL1271_TM_CMD_SET_PLT_MODE,
        WL1271_TM_CMD_RECOVER,
 
@@ -87,31 +90,47 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
                return -EMSGSIZE;
 
        mutex_lock(&wl->mutex);
-       ret = wl1271_cmd_test(wl, buf, buf_len, answer);
-       mutex_unlock(&wl->mutex);
 
+       if (wl->state == WL1271_STATE_OFF) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       ret = wl1271_cmd_test(wl, buf, buf_len, answer);
        if (ret < 0) {
                wl1271_warning("testmode cmd test failed: %d", ret);
-               return ret;
+               goto out_sleep;
        }
 
        if (answer) {
                len = nla_total_size(buf_len);
                skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
-               if (!skb)
-                       return -ENOMEM;
+               if (!skb) {
+                       ret = -ENOMEM;
+                       goto out_sleep;
+               }
 
                NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
                ret = cfg80211_testmode_reply(skb);
                if (ret < 0)
-                       return ret;
+                       goto out_sleep;
        }
 
-       return 0;
+out_sleep:
+       wl1271_ps_elp_sleep(wl);
+out:
+       mutex_unlock(&wl->mutex);
+
+       return ret;
 
 nla_put_failure:
        kfree_skb(skb);
-       return -EMSGSIZE;
+       ret = -EMSGSIZE;
+       goto out_sleep;
 }
 
 static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
@@ -128,33 +147,53 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
 
        ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
 
+       mutex_lock(&wl->mutex);
+
+       if (wl->state == WL1271_STATE_OFF) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out_sleep;
+       }
 
-       mutex_lock(&wl->mutex);
        ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
-       mutex_unlock(&wl->mutex);
-
        if (ret < 0) {
                wl1271_warning("testmode cmd interrogate failed: %d", ret);
-               kfree(cmd);
-               return ret;
+               goto out_free;
        }
 
        skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
        if (!skb) {
-               kfree(cmd);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_free;
        }
 
        NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+       ret = cfg80211_testmode_reply(skb);
+       if (ret < 0)
+               goto out_free;
+
+out_free:
+       kfree(cmd);
+out_sleep:
+       wl1271_ps_elp_sleep(wl);
+out:
+       mutex_unlock(&wl->mutex);
 
-       return 0;
+       return ret;
 
 nla_put_failure:
        kfree_skb(skb);
-       return -EMSGSIZE;
+       ret = -EMSGSIZE;
+       goto out_free;
 }
 
 static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
index bad9e29..4508ccd 100644 (file)
 #include <linux/etherdevice.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "io.h"
 #include "reg.h"
 #include "ps.h"
 #include "tx.h"
 #include "event.h"
 
-static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+static int wl1271_set_default_wep_key(struct wl1271 *wl,
+                                     struct wl12xx_vif *wlvif, u8 id)
 {
        int ret;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
 
        if (is_ap)
                ret = wl12xx_cmd_set_default_wep_key(wl, id,
-                                                    wl->ap_bcast_hlid);
+                                                    wlvif->ap.bcast_hlid);
        else
-               ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid);
+               ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
 
        if (ret < 0)
                return ret;
@@ -76,7 +78,8 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
 }
 
 static int wl1271_tx_update_filters(struct wl1271 *wl,
-                                                struct sk_buff *skb)
+                                   struct wl12xx_vif *wlvif,
+                                   struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr;
        int ret;
@@ -92,15 +95,11 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
        if (!ieee80211_is_auth(hdr->frame_control))
                return 0;
 
-       if (wl->dev_hlid != WL12XX_INVALID_LINK_ID)
+       if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID)
                goto out;
 
        wl1271_debug(DEBUG_CMD, "starting device role for roaming");
-       ret = wl12xx_cmd_role_start_dev(wl);
-       if (ret < 0)
-               goto out;
-
-       ret = wl12xx_roc(wl, wl->dev_role_id);
+       ret = wl12xx_start_dev(wl, wlvif);
        if (ret < 0)
                goto out;
 out:
@@ -123,18 +122,16 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
                wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
 }
 
-static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+static void wl1271_tx_regulate_link(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif,
+                                   u8 hlid)
 {
        bool fw_ps, single_sta;
        u8 tx_pkts;
 
-       /* only regulate station links */
-       if (hlid < WL1271_AP_STA_HLID_START)
+       if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
                return;
 
-       if (WARN_ON(!wl1271_is_active_sta(wl, hlid)))
-           return;
-
        fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
        tx_pkts = wl->links[hlid].allocated_pkts;
        single_sta = (wl->active_sta_count == 1);
@@ -146,7 +143,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
         * case FW-memory congestion is not a problem.
         */
        if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
-               wl1271_ps_link_start(wl, hlid, true);
+               wl12xx_ps_link_start(wl, wlvif, hlid, true);
 }
 
 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
@@ -154,7 +151,8 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
        return wl->dummy_packet == skb;
 }
 
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                        struct sk_buff *skb)
 {
        struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
 
@@ -167,49 +165,51 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
        } else {
                struct ieee80211_hdr *hdr;
 
-               if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+               if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
                        return wl->system_hlid;
 
                hdr = (struct ieee80211_hdr *)skb->data;
                if (ieee80211_is_mgmt(hdr->frame_control))
-                       return wl->ap_global_hlid;
+                       return wlvif->ap.global_hlid;
                else
-                       return wl->ap_bcast_hlid;
+                       return wlvif->ap.bcast_hlid;
        }
 }
 
-static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
-       if (wl12xx_is_dummy_packet(wl, skb))
+       if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
                return wl->system_hlid;
 
-       if (wl->bss_type == BSS_TYPE_AP_BSS)
-               return wl12xx_tx_get_hlid_ap(wl, skb);
+       if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+               return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
 
-       wl1271_tx_update_filters(wl, skb);
+       wl1271_tx_update_filters(wl, wlvif, skb);
 
-       if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
-            test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) &&
+       if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+            test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
            !ieee80211_is_auth(hdr->frame_control) &&
            !ieee80211_is_assoc_req(hdr->frame_control))
-               return wl->sta_hlid;
+               return wlvif->sta.hlid;
        else
-               return wl->dev_hlid;
+               return wlvif->dev_hlid;
 }
 
 static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
                                                unsigned int packet_length)
 {
-       if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
-               return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
-       else
+       if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)
                return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
+       else
+               return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
 }
 
-static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
-                               u32 buf_offset, u8 hlid)
+static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                             struct sk_buff *skb, u32 extra, u32 buf_offset,
+                             u8 hlid)
 {
        struct wl1271_tx_hw_descr *desc;
        u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -217,6 +217,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
        u32 total_blocks;
        int id, ret = -EBUSY, ac;
        u32 spare_blocks = wl->tx_spare_blocks;
+       bool is_dummy = false;
 
        if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
                return -EAGAIN;
@@ -231,8 +232,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
        len = wl12xx_calc_packet_alignment(wl, total_len);
 
        /* in case of a dummy packet, use default amount of spare mem blocks */
-       if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
+       if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
+               is_dummy = true;
                spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+       }
 
        total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
                spare_blocks;
@@ -257,8 +260,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
                ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
                wl->tx_allocated_pkts[ac]++;
 
-               if (wl->bss_type == BSS_TYPE_AP_BSS &&
-                   hlid >= WL1271_AP_STA_HLID_START)
+               if (!is_dummy && wlvif &&
+                   wlvif->bss_type == BSS_TYPE_AP_BSS &&
+                   test_bit(hlid, wlvif->ap.sta_hlid_map))
                        wl->links[hlid].allocated_pkts++;
 
                ret = 0;
@@ -273,15 +277,16 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
        return ret;
 }
 
-static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
-                             u32 extra, struct ieee80211_tx_info *control,
-                             u8 hlid)
+static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                              struct sk_buff *skb, u32 extra,
+                              struct ieee80211_tx_info *control, u8 hlid)
 {
        struct timespec ts;
        struct wl1271_tx_hw_descr *desc;
        int aligned_len, ac, rate_idx;
        s64 hosttime;
-       u16 tx_attr;
+       u16 tx_attr = 0;
+       bool is_dummy;
 
        desc = (struct wl1271_tx_hw_descr *) skb->data;
 
@@ -298,7 +303,8 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
        hosttime = (timespec_to_ns(&ts) >> 10);
        desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
 
-       if (wl->bss_type != BSS_TYPE_AP_BSS)
+       is_dummy = wl12xx_is_dummy_packet(wl, skb);
+       if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
                desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
        else
                desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
@@ -307,39 +313,42 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
        ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
        desc->tid = skb->priority;
 
-       if (wl12xx_is_dummy_packet(wl, skb)) {
+       if (is_dummy) {
                /*
                 * FW expects the dummy packet to have an invalid session id -
                 * any session id that is different than the one set in the join
                 */
-               tx_attr = ((~wl->session_counter) <<
+               tx_attr = (SESSION_COUNTER_INVALID <<
                           TX_HW_ATTR_OFST_SESSION_COUNTER) &
                           TX_HW_ATTR_SESSION_COUNTER;
 
                tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
-       } else {
+       } else if (wlvif) {
                /* configure the tx attributes */
-               tx_attr =
-                       wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+               tx_attr = wlvif->session_counter <<
+                         TX_HW_ATTR_OFST_SESSION_COUNTER;
        }
 
        desc->hlid = hlid;
-
-       if (wl->bss_type != BSS_TYPE_AP_BSS) {
+       if (is_dummy || !wlvif)
+               rate_idx = 0;
+       else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
                /* if the packets are destined for AP (have a STA entry)
                   send them with AP rate policies, otherwise use default
                   basic rates */
-               if (control->control.sta)
-                       rate_idx = ACX_TX_AP_FULL_RATE;
+               if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+                       rate_idx = wlvif->sta.p2p_rate_idx;
+               else if (control->control.sta)
+                       rate_idx = wlvif->sta.ap_rate_idx;
                else
-                       rate_idx = ACX_TX_BASIC_RATE;
+                       rate_idx = wlvif->sta.basic_rate_idx;
        } else {
-               if (hlid == wl->ap_global_hlid)
-                       rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
-               else if (hlid == wl->ap_bcast_hlid)
-                       rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+               if (hlid == wlvif->ap.global_hlid)
+                       rate_idx = wlvif->ap.mgmt_rate_idx;
+               else if (hlid == wlvif->ap.bcast_hlid)
+                       rate_idx = wlvif->ap.bcast_rate_idx;
                else
-                       rate_idx = ac;
+                       rate_idx = wlvif->ap.ucast_rate_idx[ac];
        }
 
        tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
@@ -379,20 +388,24 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
 }
 
 /* caller must hold wl->mutex */
-static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
-                                                       u32 buf_offset)
+static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                  struct sk_buff *skb, u32 buf_offset)
 {
        struct ieee80211_tx_info *info;
        u32 extra = 0;
        int ret = 0;
        u32 total_len;
        u8 hlid;
+       bool is_dummy;
 
        if (!skb)
                return -EINVAL;
 
        info = IEEE80211_SKB_CB(skb);
 
+       /* TODO: handle dummy packets on multi-vifs */
+       is_dummy = wl12xx_is_dummy_packet(wl, skb);
+
        if (info->control.hw_key &&
            info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
                extra = WL1271_TKIP_IV_SPACE;
@@ -405,29 +418,28 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
                is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
                         (cipher == WLAN_CIPHER_SUITE_WEP104);
 
-               if (unlikely(is_wep && wl->default_key != idx)) {
-                       ret = wl1271_set_default_wep_key(wl, idx);
+               if (unlikely(is_wep && wlvif->default_key != idx)) {
+                       ret = wl1271_set_default_wep_key(wl, wlvif, idx);
                        if (ret < 0)
                                return ret;
-                       wl->default_key = idx;
+                       wlvif->default_key = idx;
                }
        }
-
-       hlid = wl1271_tx_get_hlid(wl, skb);
+       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
        if (hlid == WL12XX_INVALID_LINK_ID) {
                wl1271_error("invalid hlid. dropping skb 0x%p", skb);
                return -EINVAL;
        }
 
-       ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
+       ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
        if (ret < 0)
                return ret;
 
-       wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
+       wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
 
-       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+       if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
                wl1271_tx_ap_update_inconnection_sta(wl, skb);
-               wl1271_tx_regulate_link(wl, hlid);
+               wl1271_tx_regulate_link(wl, wlvif, hlid);
        }
 
        /*
@@ -444,7 +456,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
        memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
 
        /* Revert side effects in the dummy packet skb, so it can be reused */
-       if (wl12xx_is_dummy_packet(wl, skb))
+       if (is_dummy)
                skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
 
        return total_len;
@@ -522,19 +534,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
        return &queues[q];
 }
 
-static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
+                                             struct wl1271_link *lnk)
 {
-       struct sk_buff *skb = NULL;
+       struct sk_buff *skb;
        unsigned long flags;
        struct sk_buff_head *queue;
 
-       queue = wl1271_select_queue(wl, wl->tx_queue);
+       queue = wl1271_select_queue(wl, lnk->tx_queue);
        if (!queue)
-               goto out;
+               return NULL;
 
        skb = skb_dequeue(queue);
-
-out:
        if (skb) {
                int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
                spin_lock_irqsave(&wl->wl_lock, flags);
@@ -545,43 +556,33 @@ out:
        return skb;
 }
 
-static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
+                                             struct wl12xx_vif *wlvif)
 {
        struct sk_buff *skb = NULL;
-       unsigned long flags;
        int i, h, start_hlid;
-       struct sk_buff_head *queue;
 
        /* start from the link after the last one */
-       start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+       start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
 
        /* dequeue according to AC, round robin on each link */
-       for (i = 0; i < AP_MAX_LINKS; i++) {
-               h = (start_hlid + i) % AP_MAX_LINKS;
+       for (i = 0; i < WL12XX_MAX_LINKS; i++) {
+               h = (start_hlid + i) % WL12XX_MAX_LINKS;
 
                /* only consider connected stations */
-               if (h >= WL1271_AP_STA_HLID_START &&
-                   !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map))
+               if (!test_bit(h, wlvif->links_map))
                        continue;
 
-               queue = wl1271_select_queue(wl, wl->links[h].tx_queue);
-               if (!queue)
+               skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
+               if (!skb)
                        continue;
 
-               skb = skb_dequeue(queue);
-               if (skb)
-                       break;
+               wlvif->last_tx_hlid = h;
+               break;
        }
 
-       if (skb) {
-               int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
-               wl->last_tx_hlid = h;
-               spin_lock_irqsave(&wl->wl_lock, flags);
-               wl->tx_queue_count[q]--;
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
-       } else {
-               wl->last_tx_hlid = 0;
-       }
+       if (!skb)
+               wlvif->last_tx_hlid = 0;
 
        return skb;
 }
@@ -589,12 +590,32 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
 {
        unsigned long flags;
+       struct wl12xx_vif *wlvif = wl->last_wlvif;
        struct sk_buff *skb = NULL;
 
-       if (wl->bss_type == BSS_TYPE_AP_BSS)
-               skb = wl1271_ap_skb_dequeue(wl);
-       else
-               skb = wl1271_sta_skb_dequeue(wl);
+       if (wlvif) {
+               wl12xx_for_each_wlvif_continue(wl, wlvif) {
+                       skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+                       if (skb) {
+                               wl->last_wlvif = wlvif;
+                               break;
+                       }
+               }
+       }
+
+       /* do another pass */
+       if (!skb) {
+               wl12xx_for_each_wlvif(wl, wlvif) {
+                       skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+                       if (skb) {
+                               wl->last_wlvif = wlvif;
+                               break;
+                       }
+               }
+       }
+
+       if (!skb)
+               skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
 
        if (!skb &&
            test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
@@ -610,21 +631,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
        return skb;
 }
 
-static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
+static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                 struct sk_buff *skb)
 {
        unsigned long flags;
        int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
 
        if (wl12xx_is_dummy_packet(wl, skb)) {
                set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
-       } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
-               u8 hlid = wl1271_tx_get_hlid(wl, skb);
+       } else {
+               u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
                skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
 
                /* make sure we dequeue the same packet next time */
-               wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
-       } else {
-               skb_queue_head(&wl->tx_queue[q], skb);
+               wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
+                                     WL12XX_MAX_LINKS;
        }
 
        spin_lock_irqsave(&wl->wl_lock, flags);
@@ -639,29 +660,71 @@ static bool wl1271_tx_is_data_present(struct sk_buff *skb)
        return ieee80211_is_data_present(hdr->frame_control);
 }
 
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
+{
+       struct wl12xx_vif *wlvif;
+       u32 timeout;
+       u8 hlid;
+
+       if (!wl->conf.rx_streaming.interval)
+               return;
+
+       if (!wl->conf.rx_streaming.always &&
+           !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
+               return;
+
+       timeout = wl->conf.rx_streaming.duration;
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               bool found = false;
+               for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+                       if (test_bit(hlid, wlvif->links_map)) {
+                               found  = true;
+                               break;
+                       }
+               }
+
+               if (!found)
+                       continue;
+
+               /* enable rx streaming */
+               if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
+                       ieee80211_queue_work(wl->hw,
+                                            &wlvif->rx_streaming_enable_work);
+
+               mod_timer(&wlvif->rx_streaming_timer,
+                         jiffies + msecs_to_jiffies(timeout));
+       }
+}
+
 void wl1271_tx_work_locked(struct wl1271 *wl)
 {
+       struct wl12xx_vif *wlvif;
        struct sk_buff *skb;
+       struct wl1271_tx_hw_descr *desc;
        u32 buf_offset = 0;
        bool sent_packets = false;
-       bool had_data = false;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
        int ret;
 
        if (unlikely(wl->state == WL1271_STATE_OFF))
                return;
 
        while ((skb = wl1271_skb_dequeue(wl))) {
-               if (wl1271_tx_is_data_present(skb))
-                       had_data = true;
+               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+               bool has_data = false;
 
-               ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
+               wlvif = NULL;
+               if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
+                       wlvif = wl12xx_vif_to_data(info->control.vif);
+
+               has_data = wlvif && wl1271_tx_is_data_present(skb);
+               ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
                if (ret == -EAGAIN) {
                        /*
                         * Aggregation buffer is full.
                         * Flush buffer and try again.
                         */
-                       wl1271_skb_queue_head(wl, skb);
+                       wl1271_skb_queue_head(wl, wlvif, skb);
                        wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
                                     buf_offset, true);
                        sent_packets = true;
@@ -672,16 +735,27 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
                         * Firmware buffer is full.
                         * Queue back last skb, and stop aggregating.
                         */
-                       wl1271_skb_queue_head(wl, skb);
+                       wl1271_skb_queue_head(wl, wlvif, skb);
                        /* No work left, avoid scheduling redundant tx work */
                        set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
                        goto out_ack;
                } else if (ret < 0) {
-                       dev_kfree_skb(skb);
+                       if (wl12xx_is_dummy_packet(wl, skb))
+                               /*
+                                * fw still expects dummy packet,
+                                * so re-enqueue it
+                                */
+                               wl1271_skb_queue_head(wl, wlvif, skb);
+                       else
+                               ieee80211_free_txskb(wl->hw, skb);
                        goto out_ack;
                }
                buf_offset += ret;
                wl->tx_packets_count++;
+               if (has_data) {
+                       desc = (struct wl1271_tx_hw_descr *) skb->data;
+                       __set_bit(desc->hlid, active_hlids);
+               }
        }
 
 out_ack:
@@ -701,19 +775,7 @@ out_ack:
 
                wl1271_handle_tx_low_watermark(wl);
        }
-       if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
-           (wl->conf.rx_streaming.always ||
-            test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
-               u32 timeout = wl->conf.rx_streaming.duration;
-
-               /* enable rx streaming */
-               if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
-                       ieee80211_queue_work(wl->hw,
-                                            &wl->rx_streaming_enable_work);
-
-               mod_timer(&wl->rx_streaming_timer,
-                         jiffies + msecs_to_jiffies(timeout));
-       }
+       wl12xx_rearm_rx_streaming(wl, active_hlids);
 }
 
 void wl1271_tx_work(struct work_struct *work)
@@ -737,6 +799,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
                                      struct wl1271_tx_hw_res_descr *result)
 {
        struct ieee80211_tx_info *info;
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
        struct sk_buff *skb;
        int id = result->id;
        int rate = -1;
@@ -756,11 +820,16 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
                return;
        }
 
+       /* info->control is valid as long as we don't update info->status */
+       vif = info->control.vif;
+       wlvif = wl12xx_vif_to_data(vif);
+
        /* update the TX status info */
        if (result->status == TX_SUCCESS) {
                if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
                        info->flags |= IEEE80211_TX_STAT_ACK;
-               rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
+               rate = wl1271_rate_to_idx(result->rate_class_index,
+                                         wlvif->band);
                retries = result->ack_failures;
        } else if (result->status == TX_RETRY_EXCEEDED) {
                wl->stats.excessive_retries++;
@@ -783,14 +852,14 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
             info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
             info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
                u8 fw_lsb = result->tx_security_sequence_number_lsb;
-               u8 cur_lsb = wl->tx_security_last_seq_lsb;
+               u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
 
                /*
                 * update security sequence number, taking care of potential
                 * wrap-around
                 */
-               wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256;
-               wl->tx_security_last_seq_lsb = fw_lsb;
+               wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
+               wlvif->tx_security_last_seq_lsb = fw_lsb;
        }
 
        /* remove private header from packet */
@@ -886,39 +955,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
 }
 
 /* caller must hold wl->mutex and TX must be stopped */
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int i;
-       struct sk_buff *skb;
-       struct ieee80211_tx_info *info;
 
        /* TX failure */
-       if (wl->bss_type == BSS_TYPE_AP_BSS) {
-               for (i = 0; i < AP_MAX_LINKS; i++) {
-                       wl1271_free_sta(wl, i);
-                       wl1271_tx_reset_link_queues(wl, i);
-                       wl->links[i].allocated_pkts = 0;
-                       wl->links[i].prev_freed_pkts = 0;
-               }
-
-               wl->last_tx_hlid = 0;
-       } else {
-               for (i = 0; i < NUM_TX_QUEUES; i++) {
-                       while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
-                               wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
-                                            skb);
-
-                               if (!wl12xx_is_dummy_packet(wl, skb)) {
-                                       info = IEEE80211_SKB_CB(skb);
-                                       info->status.rates[0].idx = -1;
-                                       info->status.rates[0].count = 0;
-                                       ieee80211_tx_status_ni(wl->hw, skb);
-                               }
-                       }
-               }
+       for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+               if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+                       wl1271_free_sta(wl, wlvif, i);
+               else
+                       wlvif->sta.ba_rx_bitmap = 0;
 
-               wl->ba_rx_bitmap = 0;
+               wl1271_tx_reset_link_queues(wl, i);
+               wl->links[i].allocated_pkts = 0;
+               wl->links[i].prev_freed_pkts = 0;
        }
+       wlvif->last_tx_hlid = 0;
+
+}
+/* caller must hold wl->mutex and TX must be stopped */
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+{
+       int i;
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
 
        for (i = 0; i < NUM_TX_QUEUES; i++)
                wl->tx_queue_count[i] = 0;
index dc4f09a..2dbb24e 100644 (file)
@@ -206,18 +206,23 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
 void wl1271_tx_work(struct work_struct *work);
 void wl1271_tx_work_locked(struct wl1271 *wl);
 void wl1271_tx_complete(struct wl1271 *wl);
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
 void wl1271_tx_flush(struct wl1271 *wl);
 u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
                                enum ieee80211_band rate_band);
 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                        struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     struct sk_buff *skb);
 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
 void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
 
 /* from main.c */
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid);
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
 
 #endif
index 1ec90fc..d21f71f 100644 (file)
 #include "conf.h"
 #include "ini.h"
 
-#define DRIVER_NAME "wl1271"
-#define DRIVER_PREFIX DRIVER_NAME ": "
-
-/*
- * FW versions support BA 11n
- * versions marks x.x.x.50-60.x
- */
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_START    50
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_END      60
-
-enum {
-       DEBUG_NONE      = 0,
-       DEBUG_IRQ       = BIT(0),
-       DEBUG_SPI       = BIT(1),
-       DEBUG_BOOT      = BIT(2),
-       DEBUG_MAILBOX   = BIT(3),
-       DEBUG_TESTMODE  = BIT(4),
-       DEBUG_EVENT     = BIT(5),
-       DEBUG_TX        = BIT(6),
-       DEBUG_RX        = BIT(7),
-       DEBUG_SCAN      = BIT(8),
-       DEBUG_CRYPT     = BIT(9),
-       DEBUG_PSM       = BIT(10),
-       DEBUG_MAC80211  = BIT(11),
-       DEBUG_CMD       = BIT(12),
-       DEBUG_ACX       = BIT(13),
-       DEBUG_SDIO      = BIT(14),
-       DEBUG_FILTERS   = BIT(15),
-       DEBUG_ADHOC     = BIT(16),
-       DEBUG_AP        = BIT(17),
-       DEBUG_MASTER    = (DEBUG_ADHOC | DEBUG_AP),
-       DEBUG_ALL       = ~0,
-};
-
-extern u32 wl12xx_debug_level;
-
-#define DEBUG_DUMP_LIMIT 1024
-
-#define wl1271_error(fmt, arg...) \
-       pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
-
-#define wl1271_warning(fmt, arg...) \
-       pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
-
-#define wl1271_notice(fmt, arg...) \
-       pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_info(fmt, arg...) \
-       pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_debug(level, fmt, arg...) \
-       do { \
-               if (level & wl12xx_debug_level) \
-                       pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
-       } while (0)
-
-/* TODO: use pr_debug_hex_dump when it will be available */
-#define wl1271_dump(level, prefix, buf, len)   \
-       do { \
-               if (level & wl12xx_debug_level) \
-                       print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
-                                      DUMP_PREFIX_OFFSET, 16, 1,       \
-                                      buf,                             \
-                                      min_t(size_t, len, DEBUG_DUMP_LIMIT), \
-                                      0);                              \
-       } while (0)
-
-#define wl1271_dump_ascii(level, prefix, buf, len)     \
-       do { \
-               if (level & wl12xx_debug_level) \
-                       print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
-                                      DUMP_PREFIX_OFFSET, 16, 1,       \
-                                      buf,                             \
-                                      min_t(size_t, len, DEBUG_DUMP_LIMIT), \
-                                      true);                           \
-       } while (0)
-
 #define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
 #define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
 
@@ -142,15 +65,11 @@ extern u32 wl12xx_debug_level;
 #define WL12XX_INVALID_ROLE_ID     0xff
 #define WL12XX_INVALID_LINK_ID     0xff
 
+#define WL12XX_MAX_RATE_POLICIES 16
+
 /* Defined by FW as 0. Will not be freed or allocated. */
 #define WL12XX_SYSTEM_HLID         0
 
-/*
- * TODO: we currently don't support multirole. remove
- * this constant from the code when we do.
- */
-#define WL1271_AP_STA_HLID_START   3
-
 /*
  * When in AP-mode, we allow (at least) this number of packets
  * to be transmitted to FW for a STA in PS-mode. Only when packets are
@@ -236,13 +155,6 @@ struct wl1271_stats {
 
 #define AP_MAX_STATIONS            8
 
-/* Broadcast and Global links + system link + links to stations */
-/*
- * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all
- * the places that use this.
- */
-#define AP_MAX_LINKS               (AP_MAX_STATIONS + WL1271_AP_STA_HLID_START)
-
 /* FW status registers */
 struct wl12xx_fw_status {
        __le32 intr;
@@ -299,17 +211,14 @@ struct wl1271_scan {
 };
 
 struct wl1271_if_operations {
-       void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
+       void (*read)(struct device *child, int addr, void *buf, size_t len,
                     bool fixed);
-       void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
+       void (*write)(struct device *child, int addr, void *buf, size_t len,
                     bool fixed);
-       void (*reset)(struct wl1271 *wl);
-       void (*init)(struct wl1271 *wl);
-       int (*power)(struct wl1271 *wl, bool enable);
-       struct device* (*dev)(struct wl1271 *wl);
-       void (*enable_irq)(struct wl1271 *wl);
-       void (*disable_irq)(struct wl1271 *wl);
-       void (*set_block_size) (struct wl1271 *wl, unsigned int blksz);
+       void (*reset)(struct device *child);
+       void (*init)(struct device *child);
+       int (*power)(struct device *child, bool enable);
+       void (*set_block_size) (struct device *child, unsigned int blksz);
 };
 
 #define MAX_NUM_KEYS 14
@@ -326,29 +235,33 @@ struct wl1271_ap_key {
 };
 
 enum wl12xx_flags {
-       WL1271_FLAG_STA_ASSOCIATED,
-       WL1271_FLAG_IBSS_JOINED,
        WL1271_FLAG_GPIO_POWER,
        WL1271_FLAG_TX_QUEUE_STOPPED,
        WL1271_FLAG_TX_PENDING,
        WL1271_FLAG_IN_ELP,
        WL1271_FLAG_ELP_REQUESTED,
-       WL1271_FLAG_PSM,
-       WL1271_FLAG_PSM_REQUESTED,
        WL1271_FLAG_IRQ_RUNNING,
        WL1271_FLAG_IDLE,
-       WL1271_FLAG_PSPOLL_FAILURE,
-       WL1271_FLAG_STA_STATE_SENT,
        WL1271_FLAG_FW_TX_BUSY,
-       WL1271_FLAG_AP_STARTED,
-       WL1271_FLAG_IF_INITIALIZED,
        WL1271_FLAG_DUMMY_PACKET_PENDING,
        WL1271_FLAG_SUSPENDED,
        WL1271_FLAG_PENDING_WORK,
        WL1271_FLAG_SOFT_GEMINI,
-       WL1271_FLAG_RX_STREAMING_STARTED,
        WL1271_FLAG_RECOVERY_IN_PROGRESS,
-       WL1271_FLAG_CS_PROGRESS,
+};
+
+enum wl12xx_vif_flags {
+       WLVIF_FLAG_INITIALIZED,
+       WLVIF_FLAG_STA_ASSOCIATED,
+       WLVIF_FLAG_IBSS_JOINED,
+       WLVIF_FLAG_AP_STARTED,
+       WLVIF_FLAG_PSM,
+       WLVIF_FLAG_PSM_REQUESTED,
+       WLVIF_FLAG_STA_STATE_SENT,
+       WLVIF_FLAG_RX_STREAMING_STARTED,
+       WLVIF_FLAG_PSPOLL_FAILURE,
+       WLVIF_FLAG_CS_PROGRESS,
+       WLVIF_FLAG_AP_PROBE_RESP_SET,
 };
 
 struct wl1271_link {
@@ -366,10 +279,11 @@ struct wl1271_link {
 };
 
 struct wl1271 {
-       struct platform_device *plat_dev;
        struct ieee80211_hw *hw;
        bool mac80211_registered;
 
+       struct device *dev;
+
        void *if_priv;
 
        struct wl1271_if_operations *if_ops;
@@ -399,25 +313,20 @@ struct wl1271 {
 
        s8 hw_pg_ver;
 
-       u8 bssid[ETH_ALEN];
        u8 mac_addr[ETH_ALEN];
-       u8 bss_type;
-       u8 set_bss_type;
-       u8 p2p; /* we are using p2p role */
-       u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
-       u8 ssid_len;
        int channel;
-       u8 role_id;
-       u8 dev_role_id;
        u8 system_hlid;
-       u8 sta_hlid;
-       u8 dev_hlid;
-       u8 ap_global_hlid;
-       u8 ap_bcast_hlid;
 
        unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
        unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
        unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
+       unsigned long rate_policies_map[
+                       BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
+
+       struct list_head wlvif_list;
+
+       u8 sta_count;
+       u8 ap_count;
 
        struct wl1271_acx_mem_map *target_mem_map;
 
@@ -440,11 +349,7 @@ struct wl1271 {
        /* Time-offset between host and chipset clocks */
        s64 time_offset;
 
-       /* Session counter for the chipset */
-       int session_counter;
-
        /* Frames scheduled for transmission, not handled yet */
-       struct sk_buff_head tx_queue[NUM_TX_QUEUES];
        int tx_queue_count[NUM_TX_QUEUES];
        long stopped_queues_map;
 
@@ -462,17 +367,6 @@ struct wl1271 {
        struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
        int tx_frames_cnt;
 
-       /*
-        * Security sequence number
-        *     bits 0-15: lower 16 bits part of sequence number
-        *     bits 16-47: higher 32 bits part of sequence number
-        *     bits 48-63: not in use
-        */
-       u64 tx_security_seq;
-
-       /* 8 bits of the last sequence number in use */
-       u8 tx_security_last_seq_lsb;
-
        /* FW Rx counter */
        u32 rx_counter;
 
@@ -507,59 +401,21 @@ struct wl1271 {
        u32 mbox_ptr[2];
 
        /* Are we currently scanning */
+       struct ieee80211_vif *scan_vif;
        struct wl1271_scan scan;
        struct delayed_work scan_complete_work;
 
        bool sched_scanning;
 
-       /* probe-req template for the current AP */
-       struct sk_buff *probereq;
-
-       /* Our association ID */
-       u16 aid;
-
-       /*
-        * currently configured rate set:
-        *      bits  0-15 - 802.11abg rates
-        *      bits 16-23 - 802.11n   MCS index mask
-        * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
-        */
-       u32 basic_rate_set;
-       u32 basic_rate;
-       u32 rate_set;
-       u32 bitrate_masks[IEEE80211_NUM_BANDS];
-
        /* The current band */
        enum ieee80211_band band;
 
-       /* Beaconing interval (needed for ad-hoc) */
-       u32 beacon_int;
-
-       /* Default key (for WEP) */
-       u32 default_key;
-
-       /* Rx Streaming */
-       struct work_struct rx_streaming_enable_work;
-       struct work_struct rx_streaming_disable_work;
-       struct timer_list rx_streaming_timer;
-
        struct completion *elp_compl;
-       struct completion *ps_compl;
        struct delayed_work elp_work;
-       struct delayed_work pspoll_work;
-
-       /* counter for ps-poll delivery failures */
-       int ps_poll_failures;
-
-       /* retry counter for PSM entries */
-       u8 psm_entry_retry;
 
        /* in dBm */
        int power_level;
 
-       int rssi_thold;
-       int last_rssi_event;
-
        struct wl1271_stats stats;
 
        __le32 buffer_32;
@@ -583,20 +439,9 @@ struct wl1271 {
        /* Most recently reported noise in dBm */
        s8 noise;
 
-       /* map for HLIDs of associated stations - when operating in AP mode */
-       unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
-
-       /* recoreded keys for AP-mode - set here before AP startup */
-       struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
-
        /* bands supported by this instance of wl12xx */
        struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
 
-       /* RX BA constraint value */
-       bool ba_support;
-       u8 ba_rx_bitmap;
-       bool ba_allowed;
-
        int tcxo_clock;
 
        /*
@@ -610,10 +455,7 @@ struct wl1271 {
         * AP-mode - links indexed by HLID. The global and broadcast links
         * are always active.
         */
-       struct wl1271_link links[AP_MAX_LINKS];
-
-       /* the hlid of the link where the last transmitted skb came from */
-       int last_tx_hlid;
+       struct wl1271_link links[WL12XX_MAX_LINKS];
 
        /* AP-mode - a bitmap of links currently in PS mode according to FW */
        u32 ap_fw_ps_map;
@@ -632,21 +474,173 @@ struct wl1271 {
 
        /* AP-mode - number of currently connected stations */
        int active_sta_count;
+
+       /* last wlvif we transmitted from */
+       struct wl12xx_vif *last_wlvif;
 };
 
 struct wl1271_station {
        u8 hlid;
 };
 
+struct wl12xx_vif {
+       struct wl1271 *wl;
+       struct list_head list;
+       unsigned long flags;
+       u8 bss_type;
+       u8 p2p; /* we are using p2p role */
+       u8 role_id;
+
+       /* sta/ibss specific */
+       u8 dev_role_id;
+       u8 dev_hlid;
+
+       union {
+               struct {
+                       u8 hlid;
+                       u8 ba_rx_bitmap;
+
+                       u8 basic_rate_idx;
+                       u8 ap_rate_idx;
+                       u8 p2p_rate_idx;
+               } sta;
+               struct {
+                       u8 global_hlid;
+                       u8 bcast_hlid;
+
+                       /* HLIDs bitmap of associated stations */
+                       unsigned long sta_hlid_map[BITS_TO_LONGS(
+                                                       WL12XX_MAX_LINKS)];
+
+                       /* recoreded keys - set here before AP startup */
+                       struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
+
+                       u8 mgmt_rate_idx;
+                       u8 bcast_rate_idx;
+                       u8 ucast_rate_idx[CONF_TX_MAX_AC_COUNT];
+               } ap;
+       };
+
+       /* the hlid of the last transmitted skb */
+       int last_tx_hlid;
+
+       unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+
+       u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+       u8 ssid_len;
+
+       /* The current band */
+       enum ieee80211_band band;
+       int channel;
+
+       u32 bitrate_masks[IEEE80211_NUM_BANDS];
+       u32 basic_rate_set;
+
+       /*
+        * currently configured rate set:
+        *      bits  0-15 - 802.11abg rates
+        *      bits 16-23 - 802.11n   MCS index mask
+        * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
+        */
+       u32 basic_rate;
+       u32 rate_set;
+
+       /* probe-req template for the current AP */
+       struct sk_buff *probereq;
+
+       /* Beaconing interval (needed for ad-hoc) */
+       u32 beacon_int;
+
+       /* Default key (for WEP) */
+       u32 default_key;
+
+       /* Our association ID */
+       u16 aid;
+
+       /* Session counter for the chipset */
+       int session_counter;
+
+       struct completion *ps_compl;
+       struct delayed_work pspoll_work;
+
+       /* counter for ps-poll delivery failures */
+       int ps_poll_failures;
+
+       /* retry counter for PSM entries */
+       u8 psm_entry_retry;
+
+       /* in dBm */
+       int power_level;
+
+       int rssi_thold;
+       int last_rssi_event;
+
+       /* RX BA constraint value */
+       bool ba_support;
+       bool ba_allowed;
+
+       /* Rx Streaming */
+       struct work_struct rx_streaming_enable_work;
+       struct work_struct rx_streaming_disable_work;
+       struct timer_list rx_streaming_timer;
+
+       /*
+        * This struct must be last!
+        * data that has to be saved acrossed reconfigs (e.g. recovery)
+        * should be declared in this struct.
+        */
+       struct {
+               u8 persistent[0];
+               /*
+                * Security sequence number
+                *     bits 0-15: lower 16 bits part of sequence number
+                *     bits 16-47: higher 32 bits part of sequence number
+                *     bits 48-63: not in use
+                */
+               u64 tx_security_seq;
+
+               /* 8 bits of the last sequence number in use */
+               u8 tx_security_last_seq_lsb;
+       };
+};
+
+static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif)
+{
+       return (struct wl12xx_vif *)vif->drv_priv;
+}
+
+static inline
+struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
+{
+       return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+}
+
+#define wl12xx_for_each_wlvif(wl, wlvif) \
+               list_for_each_entry(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_continue(wl, wlvif) \
+               list_for_each_entry_continue(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_bss_type(wl, wlvif, _bss_type)   \
+               wl12xx_for_each_wlvif(wl, wlvif)                \
+                       if (wlvif->bss_type == _bss_type)
+
+#define wl12xx_for_each_wlvif_sta(wl, wlvif)   \
+               wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_STA_BSS)
+
+#define wl12xx_for_each_wlvif_ap(wl, wlvif)    \
+               wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS)
+
 int wl1271_plt_start(struct wl1271 *wl);
 int wl1271_plt_stop(struct wl1271 *wl);
-int wl1271_recalc_rx_streaming(struct wl1271 *wl);
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl12xx_queue_recovery_work(struct wl1271 *wl);
 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
 
 #define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
 
-#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */
+#define SESSION_COUNTER_MAX 6 /* maximum value for the session counter */
+#define SESSION_COUNTER_INVALID 7 /* used with dummy_packet */
 
 #define WL1271_DEFAULT_POWER_LEVEL 0
 
@@ -669,8 +663,8 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
 /* Each RX/TX transaction requires an end-of-transaction transfer */
 #define WL12XX_QUIRK_END_OF_TRANSACTION                BIT(0)
 
-/* WL128X requires aggregated packets to be aligned to the SDIO block size */
-#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT       BIT(2)
+/* wl127x and SPI don't support SDIO block size alignment */
+#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT    BIT(2)
 
 /* Older firmwares did not implement the FW logger over bus feature */
 #define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED     BIT(4)
index f7971d3..8f0ffaf 100644 (file)
@@ -116,11 +116,6 @@ struct wl12xx_ps_poll_template {
        u8 ta[ETH_ALEN];
 } __packed;
 
-struct wl12xx_qos_null_data_template {
-       struct ieee80211_header header;
-       __le16 qos_ctl;
-} __packed;
-
 struct wl12xx_arp_rsp_template {
        struct ieee80211_hdr_3addr hdr;
 
index 973b110..3c96b33 100644 (file)
@@ -2,7 +2,7 @@
 #include <linux/err.h>
 #include <linux/wl12xx.h>
 
-static const struct wl12xx_platform_data *platform_data;
+static struct wl12xx_platform_data *platform_data;
 
 int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
 {
@@ -18,7 +18,7 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
        return 0;
 }
 
-const struct wl12xx_platform_data *wl12xx_get_platform_data(void)
+struct wl12xx_platform_data *wl12xx_get_platform_data(void)
 {
        if (!platform_data)
                return ERR_PTR(-ENODEV);
index 1ae270e..639cf8a 100644 (file)
@@ -395,7 +395,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
        struct gnttab_copy *copy_gop;
        struct netbk_rx_meta *meta;
        /*
-        * These variables a used iff get_page_ext returns true,
+        * These variables are used iff get_page_ext returns true,
         * in which case they are guaranteed to be initialized.
         */
        unsigned int uninitialized_var(group), uninitialized_var(idx);
@@ -940,8 +940,6 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
                if (!page)
                        return NULL;
 
-               netbk->mmap_pages[pending_idx] = page;
-
                gop->source.u.ref = txp->gref;
                gop->source.domid = vif->domid;
                gop->source.offset = txp->offset;
@@ -1336,8 +1334,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
                        continue;
                }
 
-               netbk->mmap_pages[pending_idx] = page;
-
                gop->source.u.ref = txreq.gref;
                gop->source.domid = vif->domid;
                gop->source.offset = txreq.offset;
@@ -1668,7 +1664,7 @@ static int __init netback_init(void)
                                             "netback/%u", group);
 
                if (IS_ERR(netbk->task)) {
-                       printk(KERN_ALERT "kthread_run() fails at netback\n");
+                       printk(KERN_ALERT "kthread_create() fails at netback\n");
                        del_timer(&netbk->net_timer);
                        rc = PTR_ERR(netbk->task);
                        goto failed_init;
index 4312db8..0a59c57 100644 (file)
@@ -1709,7 +1709,6 @@ static void netback_changed(struct xenbus_device *dev,
        case XenbusStateInitialised:
        case XenbusStateReconfiguring:
        case XenbusStateReconfigured:
-       case XenbusStateConnected:
        case XenbusStateUnknown:
        case XenbusStateClosed:
                break;
@@ -1720,6 +1719,9 @@ static void netback_changed(struct xenbus_device *dev,
                if (xennet_connect(netdev) != 0)
                        break;
                xenbus_switch_state(dev, XenbusStateConnected);
+               break;
+
+       case XenbusStateConnected:
                netif_notify_peers(netdev);
                break;
 
index 7bcb1fe..b8b6c2a 100644 (file)
@@ -72,6 +72,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
 #define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
 #define PN533_CMD_IN_ATR 0x50
 #define PN533_CMD_IN_RELEASE 0x52
+#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
 
 #define PN533_CMD_RESPONSE(cmd) (cmd + 1)
 
@@ -231,6 +232,26 @@ struct pn533_cmd_activate_response {
        u8 gt[];
 } __packed;
 
+/* PN533_CMD_IN_JUMP_FOR_DEP */
+struct pn533_cmd_jump_dep {
+       u8 active;
+       u8 baud;
+       u8 next;
+       u8 gt[];
+} __packed;
+
+struct pn533_cmd_jump_dep_response {
+       u8 status;
+       u8 tg;
+       u8 nfcid3t[10];
+       u8 didt;
+       u8 bst;
+       u8 brt;
+       u8 to;
+       u8 ppt;
+       /* optional */
+       u8 gt[];
+} __packed;
 
 struct pn533 {
        struct usb_device *udev;
@@ -1121,6 +1142,7 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
 {
        struct pn533_cmd_activate_param param;
        struct pn533_cmd_activate_response *resp;
+       u16 gt_len;
        int rc;
 
        nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
@@ -1146,7 +1168,11 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
        if (rc != PN533_CMD_RET_SUCCESS)
                return -EIO;
 
-       return 0;
+       /* ATR_RES general bytes are located at offset 16 */
+       gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 16;
+       rc = nfc_set_remote_general_bytes(dev->nfc_dev, resp->gt, gt_len);
+
+       return rc;
 }
 
 static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
@@ -1239,6 +1265,142 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
        return;
 }
 
+
+static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
+                                               u8 *params, int params_len)
+{
+       struct pn533_cmd_jump_dep *cmd;
+       struct pn533_cmd_jump_dep_response *resp;
+       struct nfc_target nfc_target;
+       u8 target_gt_len;
+       int rc;
+
+       if (params_len == -ENOENT) {
+               nfc_dev_dbg(&dev->interface->dev, "");
+               return 0;
+       }
+
+       if (params_len < 0) {
+               nfc_dev_err(&dev->interface->dev,
+                               "Error %d when bringing DEP link up",
+                                                               params_len);
+               return 0;
+       }
+
+       if (dev->tgt_available_prots &&
+           !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
+               nfc_dev_err(&dev->interface->dev,
+                       "The target does not support DEP");
+               return -EINVAL;
+       }
+
+       resp = (struct pn533_cmd_jump_dep_response *) params;
+       cmd = (struct pn533_cmd_jump_dep *) arg;
+       rc = resp->status & PN533_CMD_RET_MASK;
+       if (rc != PN533_CMD_RET_SUCCESS) {
+               nfc_dev_err(&dev->interface->dev,
+                               "Bringing DEP link up failed %d", rc);
+               return 0;
+       }
+
+       if (!dev->tgt_available_prots) {
+               nfc_dev_dbg(&dev->interface->dev, "Creating new target");
+
+               nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+               rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+               if (rc)
+                       return 0;
+
+               dev->tgt_available_prots = 0;
+       }
+
+       dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+       /* ATR_RES general bytes are located at offset 17 */
+       target_gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 17;
+       rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+                                               resp->gt, target_gt_len);
+       if (rc == 0)
+               rc = nfc_dep_link_is_up(dev->nfc_dev,
+                                               dev->nfc_dev->targets[0].idx,
+                                               !cmd->active, NFC_RF_INITIATOR);
+
+       return 0;
+}
+
+static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx,
+                                               u8 comm_mode, u8 rf_mode)
+{
+       struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+       struct pn533_cmd_jump_dep *cmd;
+       u8 cmd_len, local_gt_len, *local_gt;
+       int rc;
+
+       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+       if (rf_mode == NFC_RF_TARGET) {
+               nfc_dev_err(&dev->interface->dev, "Target mode not supported");
+               return -EOPNOTSUPP;
+       }
+
+
+       if (dev->poll_mod_count) {
+               nfc_dev_err(&dev->interface->dev,
+                               "Cannot bring the DEP link up while polling");
+               return -EBUSY;
+       }
+
+       if (dev->tgt_active_prot) {
+               nfc_dev_err(&dev->interface->dev,
+                               "There is already an active target");
+               return -EBUSY;
+       }
+
+       local_gt = nfc_get_local_general_bytes(dev->nfc_dev, &local_gt_len);
+       if (local_gt_len > NFC_MAX_GT_LEN)
+               return -EINVAL;
+
+       cmd_len = sizeof(struct pn533_cmd_jump_dep) + local_gt_len;
+       cmd = kzalloc(cmd_len, GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+
+       pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP);
+
+       cmd->active = !comm_mode;
+       cmd->baud = 0;
+       if (local_gt != NULL) {
+               cmd->next = 4; /* We have some Gi */
+               memcpy(cmd->gt, local_gt, local_gt_len);
+       } else {
+               cmd->next = 0;
+       }
+
+       memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), cmd, cmd_len);
+       dev->out_frame->datalen += cmd_len;
+
+       pn533_tx_frame_finish(dev->out_frame);
+
+       rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
+                               dev->in_maxlen, pn533_in_dep_link_up_complete,
+                               cmd, GFP_KERNEL);
+       if (rc)
+               goto out;
+
+
+out:
+       kfree(cmd);
+
+       return rc;
+}
+
+static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
+{
+       pn533_deactivate_target(nfc_dev, 0);
+
+       return 0;
+}
+
 #define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
 #define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
 
@@ -1339,7 +1501,7 @@ error:
        return 0;
 }
 
-int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
+static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
                                                struct sk_buff *skb,
                                                data_exchange_cb_t cb,
                                                void *cb_context)
@@ -1368,7 +1530,7 @@ int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
                        PN533_CMD_DATAEXCH_DATA_MAXLEN +
                        PN533_FRAME_TAIL_SIZE;
 
-       skb_resp = nfc_alloc_skb(skb_resp_len, GFP_KERNEL);
+       skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
        if (!skb_resp) {
                rc = -ENOMEM;
                goto error;
@@ -1434,6 +1596,8 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
 struct nfc_ops pn533_nfc_ops = {
        .dev_up = NULL,
        .dev_down = NULL,
+       .dep_link_up = pn533_dep_link_up,
+       .dep_link_down = pn533_dep_link_down,
        .start_poll = pn533_start_poll,
        .stop_poll = pn533_stop_poll,
        .activate_target = pn533_activate_target,
index 19c0115..0f0cfa3 100644 (file)
 #include <linux/string.h>
 #include <linux/slab.h>
 
-/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
 /**
  * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
  * @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
        struct of_irq oirq;
 
        if (of_irq_map_one(dev, index, &oirq))
-               return NO_IRQ;
+               return 0;
 
        return irq_create_of_mapping(oirq.controller, oirq.specifier,
                                     oirq.size);
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 
        /* Only dereference the resource if both the
         * resource and the irq are valid. */
-       if (r && irq != NO_IRQ) {
+       if (r && irq) {
                r->start = r->end = irq;
                r->flags = IORESOURCE_IRQ;
                r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
 {
        int nr = 0;
 
-       while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+       while (of_irq_to_resource(dev, nr, NULL))
                nr++;
 
        return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
        int i;
 
        for (i = 0; i < nr_irqs; i++, res++)
-               if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+               if (!of_irq_to_resource(dev, i, res))
                        break;
 
        return i;
index dccd863..f8c752e 100644 (file)
@@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
        return err;
 }
 
+static int timer_mode;
+
 static int __init oprofile_init(void)
 {
        int err;
 
+       /* always init architecture to setup backtrace support */
        err = oprofile_arch_init(&oprofile_ops);
-       if (err < 0 || timer) {
-               printk(KERN_INFO "oprofile: using timer interrupt.\n");
+
+       timer_mode = err || timer;      /* fall back to timer mode on errors */
+       if (timer_mode) {
+               if (!err)
+                       oprofile_arch_exit();
                err = oprofile_timer_init(&oprofile_ops);
                if (err)
                        return err;
        }
-       return oprofilefs_register();
+
+       err = oprofilefs_register();
+       if (!err)
+               return 0;
+
+       /* failed */
+       if (timer_mode)
+               oprofile_timer_exit();
+       else
+               oprofile_arch_exit();
+
+       return err;
 }
 
 
 static void __exit oprofile_exit(void)
 {
-       oprofile_timer_exit();
        oprofilefs_unregister();
-       oprofile_arch_exit();
+       if (timer_mode)
+               oprofile_timer_exit();
+       else
+               oprofile_arch_exit();
 }
 
 
index 89f6345..84a208d 100644 (file)
@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_timeout(val);
@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
+       retval = 0;
        if (val)
                retval = oprofile_start();
        else
index d0de6cc..2f0aa0f 100644 (file)
@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
 }
 
 
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
 int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
 {
        char tmpbuf[TMPBUFSIZE];
@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
        raw_spin_lock_irqsave(&oprofilefs_lock, flags);
        *val = simple_strtoul(tmpbuf, NULL, 0);
        raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
-       return 0;
+       return count;
 }
 
 
@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&value, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_ulong(file->private_data, value);
index 3ef4462..878fba1 100644 (file)
@@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
        ops->start = oprofile_hrtimer_start;
        ops->stop = oprofile_hrtimer_stop;
        ops->cpu_type = "timer";
+       printk(KERN_INFO "oprofile: using timer interrupt.\n");
        return 0;
 }
 
index 7ec56fb..b0dd08e 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/pci-ats.h>
 #include <linux/pci.h>
+#include <linux/slab.h>
 
 #include "pci.h"
 
index fce1c54..9ddf69e 100644 (file)
@@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
        if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
                return AE_OK;
 
+       pdev = pbus->self;
+       if (pdev && pci_is_pcie(pdev)) {
+               tmp = acpi_find_root_bridge_handle(pdev);
+               if (tmp) {
+                       struct acpi_pci_root *root = acpi_pci_find_root(tmp);
+
+                       if (root && (root->osc_control_set &
+                                       OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+                               return AE_OK;
+               }
+       }
+
        acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
        device = (adr >> 16) & 0xffff;
        function = adr & 0xffff;
@@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
 
        pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
        if (pdev) {
-               pdev->current_state = PCI_D0;
                slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
                pci_dev_put(pdev);
        }
@@ -459,17 +470,8 @@ static int add_bridge(acpi_handle handle)
 {
        acpi_status status;
        unsigned long long tmp;
-       struct acpi_pci_root *root;
        acpi_handle dummy_handle;
 
-       /*
-        * We shouldn't use this bridge if PCIe native hotplug control has been
-        * granted by the BIOS for it.
-        */
-       root = acpi_pci_find_root(handle);
-       if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
-               return -ENODEV;
-
        /* if the bridge doesn't have _STA, we assume it is always there */
        status = acpi_get_handle(handle, "_STA", &dummy_handle);
        if (ACPI_SUCCESS(status)) {
@@ -1385,19 +1387,11 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
 static acpi_status
 find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
 {
-       struct acpi_pci_root *root;
        int *count = (int *)context;
 
        if (!acpi_is_root_bridge(handle))
                return AE_OK;
 
-       root = acpi_pci_find_root(handle);
-       if (!root)
-               return AE_OK;
-
-       if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
-               return AE_OK;
-
        (*count)++;
        acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
                                    handle_hotplug_event_bridge, NULL);
index b82c155..1969a3e 100644 (file)
@@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
        struct resource *res;
        struct pci_dev *pdev;
        struct pci_sriov *iov = dev->sriov;
+       int bars = 0;
 
        if (!nr_virtfn)
                return 0;
@@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
 
        nres = 0;
        for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+               bars |= (1 << (i + PCI_IOV_RESOURCES));
                res = dev->resource + PCI_IOV_RESOURCES + i;
                if (res->parent)
                        nres++;
@@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
                return -ENOMEM;
        }
 
+       if (pci_enable_resources(dev, bars)) {
+               dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+               return -ENOMEM;
+       }
+
        if (iov->link != dev->devfn) {
                pdev = pci_get_slot(dev->bus, iov->link);
                if (!pdev)
index 6f45a73..6d4a531 100644 (file)
@@ -664,6 +664,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
                error = platform_pci_set_power_state(dev, state);
                if (!error)
                        pci_update_current_state(dev, state);
+               /* Fall back to PCI_D0 if native PM is not supported */
+               if (!dev->pm_cap)
+                       dev->current_state = PCI_D0;
        } else {
                error = -ENODEV;
                /* Fall back to PCI_D0 if native PM is not supported */
@@ -1126,7 +1129,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
        if (atomic_add_return(1, &dev->enable_cnt) > 1)
                return 0;               /* already enabled */
 
-       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       /* only skip sriov related */
+       for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+               if (dev->resource[i].flags & flags)
+                       bars |= (1 << i);
+       for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
                if (dev->resource[i].flags & flags)
                        bars |= (1 << i);
 
index 13ef8c3..dcdc1f4 100644 (file)
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
        int illumination_supported:1;
        int video_supported:1;
        int fan_supported:1;
+       int system_event_supported:1;
 
        struct mutex mutex;
 };
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
        u32 hci_result;
        u32 value;
 
-       if (!dev->key_event_valid) {
+       if (!dev->key_event_valid && dev->system_event_supported) {
                hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
                if (hci_result == HCI_SUCCESS) {
                        dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
 
        /* enable event fifo */
        hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+       if (hci_result == HCI_SUCCESS)
+               dev->system_event_supported = 1;
 
        props.type = BACKLIGHT_PLATFORM;
        props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
 {
        struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
        u32 hci_result, value;
+       int retries = 3;
 
-       if (event != 0x80)
+       if (!dev->system_event_supported || event != 0x80)
                return;
+
        do {
                hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
-               if (hci_result == HCI_SUCCESS) {
+               switch (hci_result) {
+               case HCI_SUCCESS:
                        if (value == 0x100)
                                continue;
                        /* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
                                pr_info("Unknown key %x\n",
                                       value);
                        }
-               } else if (hci_result == HCI_NOT_SUPPORTED) {
+                       break;
+               case HCI_NOT_SUPPORTED:
                        /* This is a workaround for an unresolved issue on
                         * some machines where system events sporadically
                         * become disabled. */
                        hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
                        pr_notice("Re-enabled hotkeys\n");
+                       /* fall through */
+               default:
+                       retries--;
+                       break;
                }
-       } while (hci_result != HCI_EMPTY);
+       } while (retries && hci_result != HCI_EMPTY);
 }
 
 
index cffcb7c..01fa671 100644 (file)
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
 #define PMIC_BATT_CHR_SBATDET_MASK     (1 << 5)
 #define PMIC_BATT_CHR_SDCLMT_MASK      (1 << 6)
 #define PMIC_BATT_CHR_SUSBOVP_MASK     (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK       0xC6
+#define PMIC_BATT_CHR_EXCPT_MASK       0x86
+
 #define PMIC_BATT_ADC_ACCCHRG_MASK     (1 << 31)
 #define PMIC_BATT_ADC_ACCCHRGVAL_MASK  0x7FFFFFFF
 
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
                        pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
                        pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
                        batt_exception = 1;
-               } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
-                       pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
-                       pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
-                       pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
-                       batt_exception = 1;
                } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
                        pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
                        pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
                        batt_exception = 1;
                } else {
                        pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+                       if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
+                               /* PMIC will change charging current automatically */
+                               pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
+                       }
                }
        }
 
index cf3f999..10451a1 100644 (file)
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
 
 static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
 {
-       return 1; /* always round timer functions to one nanosecond */
+       tp->tv_sec = 0;
+       tp->tv_nsec = 1;
+       return 0;
 }
 
 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
index 5225930..691b1ab 100644 (file)
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
        INIT_WORK(&priv->idb_work, tsi721_db_dpc);
 
        /* Allocate buffer for inbound doorbells queue */
-       priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+       priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
                                IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
                                &priv->idb_dma, GFP_KERNEL);
        if (!priv->idb_base)
                return -ENOMEM;
 
-       memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
        dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
                priv->idb_base, (unsigned long long)priv->idb_dma);
 
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
         */
 
        /* Allocate space for DMA descriptors */
-       bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+       bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                        bd_num * sizeof(struct tsi721_dma_desc),
                                        &bd_phys, GFP_KERNEL);
        if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        priv->bdma[chnum].bd_phys = bd_phys;
        priv->bdma[chnum].bd_base = bd_ptr;
 
-       memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
        dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
                bd_ptr, (unsigned long long)bd_phys);
 
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
                                        bd_num : TSI721_DMA_MINSTSSZ;
        sts_size = roundup_pow_of_two(sts_size);
-       sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+       sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                     sts_size * sizeof(struct tsi721_dma_sts),
                                     &sts_phys, GFP_KERNEL);
        if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        priv->bdma[chnum].sts_base = sts_ptr;
        priv->bdma[chnum].sts_size = sts_size;
 
-       memset(sts_ptr, 0, sts_size);
-
        dev_dbg(&priv->pdev->dev,
                "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
                sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
 
        /* Outbound message descriptor status FIFO allocation */
        priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
-       priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+       priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
                        priv->omsg_ring[mbox].sts_size *
                                                sizeof(struct tsi721_dma_sts),
                        &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
                goto out_desc;
        }
 
-       memset(priv->omsg_ring[mbox].sts_base, 0,
-               entries * sizeof(struct tsi721_dma_sts));
-
        /*
         * Configure Outbound Messaging Engine
         */
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
        INIT_LIST_HEAD(&mport->dbells);
 
        rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
-       rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
-       rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+       rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+       rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
        strcpy(mport->name, "Tsi721 mport");
 
        /* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
                                  const struct pci_device_id *id)
 {
        struct tsi721_device *priv;
-       int i;
+       int i, cap;
        int err;
        u32 regval;
 
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
                        dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
        }
 
-       /* Clear "no snoop" and "relaxed ordering" bits. */
-       pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
-       regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
-       pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+       cap = pci_pcie_cap(pdev);
+       BUG_ON(cap == 0);
+
+       /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+       pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
+       regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+                   PCI_EXP_DEVCTL_NOSNOOP_EN);
+       regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+       pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+       /* Adjust PCIe completion timeout. */
+       pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
+       regval &= ~(0x0f);
+       pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
 
        /*
         * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
index 58be4de..822e54c 100644 (file)
@@ -72,6 +72,8 @@
 #define TSI721_MSIXPBA_OFFSET  0x2a000
 #define TSI721_PCIECFG_EPCTL   0x400
 
+#define MAX_READ_REQUEST_SZ_SHIFT      12
+
 /*
  * Event Management Registers
  */
index e8326f2..dc4c274 100644 (file)
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
         */
        delta = timespec_sub(old_system, old_rtc);
        delta_delta = timespec_sub(delta, old_delta);
-       if (abs(delta_delta.tv_sec)  >= 2) {
+       if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
                /*
                 * if delta_delta is too large, assume time correction
                 * has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
        rtc_tm_to_time(&tm, &new_rtc.tv_sec);
        new_rtc.tv_nsec = 0;
 
-       if (new_rtc.tv_sec <= old_rtc.tv_sec) {
-               if (new_rtc.tv_sec < old_rtc.tv_sec)
-                       pr_debug("%s:  time travel!\n", dev_name(&rtc->dev));
+       if (new_rtc.tv_sec < old_rtc.tv_sec) {
+               pr_debug("%s:  time travel!\n", dev_name(&rtc->dev));
                return 0;
        }
 
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
        sleep_time = timespec_sub(sleep_time,
                        timespec_sub(new_system, old_system));
 
-       timekeeping_inject_sleeptime(&sleep_time);
+       if (sleep_time.tv_sec >= 0)
+               timekeeping_inject_sleeptime(&sleep_time);
        return 0;
 }
 
index 8e28625..3bcc7cf 100644 (file)
@@ -73,6 +73,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
                err = -EINVAL;
 
        mutex_unlock(&rtc->ops_lock);
+       /* A timer might have just expired */
+       schedule_work(&rtc->irqwork);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_set_time);
@@ -112,6 +114,8 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
                err = -EINVAL;
 
        mutex_unlock(&rtc->ops_lock);
+       /* A timer might have just expired */
+       schedule_work(&rtc->irqwork);
 
        return err;
 }
@@ -319,6 +323,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 }
 EXPORT_SYMBOL_GPL(rtc_read_alarm);
 
+static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+       int err;
+
+       if (!rtc->ops)
+               err = -ENODEV;
+       else if (!rtc->ops->set_alarm)
+               err = -EINVAL;
+       else
+               err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+       return err;
+}
+
 static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
        struct rtc_time tm;
@@ -342,14 +360,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
         * over right here, before we set the alarm.
         */
 
-       if (!rtc->ops)
-               err = -ENODEV;
-       else if (!rtc->ops->set_alarm)
-               err = -EINVAL;
-       else
-               err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
-       return err;
+       return ___rtc_set_alarm(rtc, alarm);
 }
 
 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -396,6 +407,8 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
                timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
        }
        mutex_unlock(&rtc->ops_lock);
+       /* maybe that was in the past.*/
+       schedule_work(&rtc->irqwork);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
@@ -763,6 +776,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        return 0;
 }
 
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+       struct rtc_wkalrm alarm;
+       struct rtc_time tm;
+
+       __rtc_read_time(rtc, &tm);
+
+       alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
+                                    ktime_set(300, 0)));
+       alarm.enabled = 0;
+
+       ___rtc_set_alarm(rtc, &alarm);
+}
+
 /**
  * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
  * @rtc rtc device
@@ -784,8 +811,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
                struct rtc_wkalrm alarm;
                int err;
                next = timerqueue_getnext(&rtc->timerqueue);
-               if (!next)
+               if (!next) {
+                       rtc_alarm_disable(rtc);
                        return;
+               }
                alarm.time = rtc_ktime_to_tm(next->expires);
                alarm.enabled = 1;
                err = __rtc_set_alarm(rtc, &alarm);
@@ -847,7 +876,8 @@ again:
                err = __rtc_set_alarm(rtc, &alarm);
                if (err == -ETIME)
                        goto again;
-       }
+       } else
+               rtc_alarm_disable(rtc);
 
        mutex_unlock(&rtc->ops_lock);
 }
index eda128f..64aedd8 100644 (file)
@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
 static struct rtc_class_ops m41t80_rtc_ops = {
        .read_time = m41t80_rtc_read_time,
        .set_time = m41t80_rtc_set_time,
+       /*
+        * XXX - m41t80 alarm functionality is reported broken.
+        * until it is fixed, don't register alarm functions.
+        *
        .read_alarm = m41t80_rtc_read_alarm,
        .set_alarm = m41t80_rtc_set_alarm,
+       */
        .proc = m41t80_rtc_proc,
+       /*
+        * See above comment on broken alarm
+        *
        .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
+       */
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
index 7639ab9..5b979d9 100644 (file)
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
        void __iomem *base = s3c_rtc_base;
        int year = tm->tm_year - 100;
 
-       clk_enable(rtc_clk);
        pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
                 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
                 tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
                return -EINVAL;
        }
 
+       clk_enable(rtc_clk);
        writeb(bin2bcd(tm->tm_sec),  base + S3C2410_RTCSEC);
        writeb(bin2bcd(tm->tm_min),  base + S3C2410_RTCMIN);
        writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
index 75c3f1f..a84631a 100644 (file)
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
 int chsc_chp_vary(struct chp_id chpid, int on)
 {
        struct channel_path *chp = chpid_to_chp(chpid);
-       struct chp_link link;
 
-       memset(&link, 0, sizeof(struct chp_link));
-       link.chpid = chpid;
        /* Wait until previous actions have settled. */
        css_wait_for_slow_path();
        /*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
                /* Try to update the channel path descritor. */
                chsc_determine_base_channel_path_desc(chpid, &chp->desc);
                for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
-                                          __s390_vary_chpid_on, &link);
+                                          __s390_vary_chpid_on, &chpid);
        } else
                for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
-                                          NULL, &link);
+                                          NULL, &chpid);
 
        return 0;
 }
index 155a82b..4a1ff5c 100644 (file)
@@ -68,8 +68,13 @@ struct schib {
        __u8 mda[4];             /* model dependent area */
 } __attribute__ ((packed,aligned(4)));
 
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
 enum sch_todo {
        SCH_TODO_NOTHING,
+       SCH_TODO_EVAL,
        SCH_TODO_UNREG,
 };
 
index 92d7324..21908e6 100644 (file)
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
 }
 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
 
-static void css_sch_todo(struct work_struct *work)
-{
-       struct subchannel *sch;
-       enum sch_todo todo;
-
-       sch = container_of(work, struct subchannel, todo_work);
-       /* Find out todo. */
-       spin_lock_irq(sch->lock);
-       todo = sch->todo;
-       CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
-                     sch->schid.sch_no, todo);
-       sch->todo = SCH_TODO_NOTHING;
-       spin_unlock_irq(sch->lock);
-       /* Perform todo. */
-       if (todo == SCH_TODO_UNREG)
-               css_sch_device_unregister(sch);
-       /* Release workqueue ref. */
-       put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
-       CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
-                     sch->schid.ssid, sch->schid.sch_no, todo);
-       if (sch->todo >= todo)
-               return;
-       /* Get workqueue ref. */
-       if (!get_device(&sch->dev))
-               return;
-       sch->todo = todo;
-       if (!queue_work(cio_work_q, &sch->todo_work)) {
-               /* Already queued, release workqueue ref. */
-               put_device(&sch->dev);
-       }
-}
-
 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
 {
        int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
                css_schedule_eval(schid);
 }
 
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+       CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+                     sch->schid.ssid, sch->schid.sch_no, todo);
+       if (sch->todo >= todo)
+               return;
+       /* Get workqueue ref. */
+       if (!get_device(&sch->dev))
+               return;
+       sch->todo = todo;
+       if (!queue_work(cio_work_q, &sch->todo_work)) {
+               /* Already queued, release workqueue ref. */
+               put_device(&sch->dev);
+       }
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+       struct subchannel *sch;
+       enum sch_todo todo;
+       int ret;
+
+       sch = container_of(work, struct subchannel, todo_work);
+       /* Find out todo. */
+       spin_lock_irq(sch->lock);
+       todo = sch->todo;
+       CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+                     sch->schid.sch_no, todo);
+       sch->todo = SCH_TODO_NOTHING;
+       spin_unlock_irq(sch->lock);
+       /* Perform todo. */
+       switch (todo) {
+       case SCH_TODO_NOTHING:
+               break;
+       case SCH_TODO_EVAL:
+               ret = css_evaluate_known_subchannel(sch, 1);
+               if (ret == -EAGAIN) {
+                       spin_lock_irq(sch->lock);
+                       css_sched_sch_todo(sch, todo);
+                       spin_unlock_irq(sch->lock);
+               }
+               break;
+       case SCH_TODO_UNREG:
+               css_sch_device_unregister(sch);
+               break;
+       }
+       /* Release workqueue ref. */
+       put_device(&sch->dev);
+}
+
 static struct idset *slow_subchannel_set;
 static spinlock_t slow_subchannel_lock;
 static wait_queue_head_t css_eval_wq;
index d734f4a..4726985 100644 (file)
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
         */
        cdev->private->flags.resuming = 1;
        cdev->private->path_new_mask = LPM_ANYPATH;
-       css_schedule_eval(sch->schid);
+       css_sched_sch_todo(sch, SCH_TODO_EVAL);
        spin_unlock_irq(sch->lock);
-       css_complete_work();
+       css_wait_for_slow_path();
 
        /* cdev may have been moved to a different subchannel. */
        sch = to_subchannel(cdev->dev.parent);
index 52c233f..1b85351 100644 (file)
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
        cdev->private->pgid_reset_mask = 0;
 }
 
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void create_fake_irb(struct irb *irb, int type)
+{
+       memset(irb, 0, sizeof(*irb));
+       if (type == FAKE_CMD_IRB) {
+               struct cmd_scsw *scsw = &irb->scsw.cmd;
+               scsw->cc = 1;
+               scsw->fctl = SCSW_FCTL_START_FUNC;
+               scsw->actl = SCSW_ACTL_START_PEND;
+               scsw->stctl = SCSW_STCTL_STATUS_PEND;
+       } else if (type == FAKE_TM_IRB) {
+               struct tm_scsw *scsw = &irb->scsw.tm;
+               scsw->x = 1;
+               scsw->cc = 1;
+               scsw->fctl = SCSW_FCTL_START_FUNC;
+               scsw->actl = SCSW_ACTL_START_PEND;
+               scsw->stctl = SCSW_STCTL_STATUS_PEND;
+       }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
 {
        struct subchannel *sch;
 
@@ -520,12 +538,8 @@ callback:
                ccw_device_done(cdev, DEV_STATE_ONLINE);
                /* Deliver fake irb to device driver, if needed. */
                if (cdev->private->flags.fake_irb) {
-                       memset(&cdev->private->irb, 0, sizeof(struct irb));
-                       cdev->private->irb.scsw.cmd.cc = 1;
-                       cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
-                       cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
-                       cdev->private->irb.scsw.cmd.stctl =
-                               SCSW_STCTL_STATUS_PEND;
+                       create_fake_irb(&cdev->private->irb,
+                                       cdev->private->flags.fake_irb);
                        cdev->private->flags.fake_irb = 0;
                        if (cdev->handler)
                                cdev->handler(cdev, cdev->private->intparm,
index f98698d..ec7fb6d 100644 (file)
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
        if (cdev->private->state == DEV_STATE_VERIFY) {
                /* Remember to fake irb when finished. */
                if (!cdev->private->flags.fake_irb) {
-                       cdev->private->flags.fake_irb = 1;
+                       cdev->private->flags.fake_irb = FAKE_CMD_IRB;
                        cdev->private->intparm = intparm;
                        return 0;
                } else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
        ret = cio_set_options (sch, flags);
        if (ret)
                return ret;
-       /* Adjust requested path mask to excluded varied off paths. */
+       /* Adjust requested path mask to exclude unusable paths. */
        if (lpm) {
-               lpm &= sch->opm;
+               lpm &= sch->lpm;
                if (lpm == 0)
                        return -EACCES;
        }
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
        sch = to_subchannel(cdev->dev.parent);
        if (!sch->schib.pmcw.ena)
                return -EINVAL;
+       if (cdev->private->state == DEV_STATE_VERIFY) {
+               /* Remember to fake irb when finished. */
+               if (!cdev->private->flags.fake_irb) {
+                       cdev->private->flags.fake_irb = FAKE_TM_IRB;
+                       cdev->private->intparm = intparm;
+                       return 0;
+               } else
+                       /* There's already a fake I/O around. */
+                       return -EBUSY;
+       }
        if (cdev->private->state != DEV_STATE_ONLINE)
                return -EIO;
-       /* Adjust requested path mask to excluded varied off paths. */
+       /* Adjust requested path mask to exclude unusable paths. */
        if (lpm) {
-               lpm &= sch->opm;
+               lpm &= sch->lpm;
                if (lpm == 0)
                        return -EACCES;
        }
index 2ebb492..76253df 100644 (file)
@@ -111,6 +111,9 @@ enum cdev_todo {
        CDEV_TODO_UNREG_EVAL,
 };
 
+#define FAKE_CMD_IRB   1
+#define FAKE_TM_IRB    2
+
 struct ccw_device_private {
        struct ccw_device *cdev;
        struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
                unsigned int doverify:1;    /* delayed path verification */
                unsigned int donotify:1;    /* call notify function */
                unsigned int recog_done:1;  /* dev. recog. complete */
-               unsigned int fake_irb:1;    /* deliver faked irb */
+               unsigned int fake_irb:2;    /* deliver faked irb */
                unsigned int resuming:1;    /* recognition while resume */
                unsigned int pgroup:1;      /* pathgroup is set up */
                unsigned int mpath:1;       /* multipathing is set up */
index ec94f04..96bbe9d 100644 (file)
@@ -1552,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
        rc = ap_init_queue(ap_dev->qid);
        if (rc == -ENODEV)
                ap_dev->unregistered = 1;
+       else
+               __ap_schedule_poll_timer();
 }
 
 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
index b6a6356..8160591 100644 (file)
@@ -63,6 +63,7 @@
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
+#include <asm/ebcdic.h>
 
 #include <net/iucv/iucv.h>
 #include "fsm.h"
@@ -75,7 +76,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
  * Debug Facility stuff
  */
 #define IUCV_DBF_SETUP_NAME "iucv_setup"
-#define IUCV_DBF_SETUP_LEN 32
+#define IUCV_DBF_SETUP_LEN 64
 #define IUCV_DBF_SETUP_PAGES 2
 #define IUCV_DBF_SETUP_NR_AREAS 1
 #define IUCV_DBF_SETUP_LEVEL 3
@@ -226,6 +227,7 @@ struct iucv_connection {
        struct net_device         *netdev;
        struct connection_profile prof;
        char                      userid[9];
+       char                      userdata[17];
 };
 
 /**
@@ -263,7 +265,7 @@ struct ll_header {
 };
 
 #define NETIUCV_HDRLEN          (sizeof(struct ll_header))
-#define NETIUCV_BUFSIZE_MAX      32768
+#define NETIUCV_BUFSIZE_MAX     65537
 #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
 #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
 #define NETIUCV_MTU_DEFAULT      9216
@@ -288,7 +290,12 @@ static inline int netiucv_test_and_set_busy(struct net_device *dev)
        return test_and_set_bit(0, &priv->tbusy);
 }
 
-static u8 iucvMagic[16] = {
+static u8 iucvMagic_ascii[16] = {
+       0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+       0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
+};
+
+static u8 iucvMagic_ebcdic[16] = {
        0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
        0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
 };
@@ -301,18 +308,38 @@ static u8 iucvMagic[16] = {
  *
  * @returns The printable string (static data!!)
  */
-static char *netiucv_printname(char *name)
+static char *netiucv_printname(char *name, int len)
 {
-       static char tmp[9];
+       static char tmp[17];
        char *p = tmp;
-       memcpy(tmp, name, 8);
-       tmp[8] = '\0';
-       while (*p && (!isspace(*p)))
+       memcpy(tmp, name, len);
+       tmp[len] = '\0';
+       while (*p && ((p - tmp) < len) && (!isspace(*p)))
                p++;
        *p = '\0';
        return tmp;
 }
 
+static char *netiucv_printuser(struct iucv_connection *conn)
+{
+       static char tmp_uid[9];
+       static char tmp_udat[17];
+       static char buf[100];
+
+       if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
+               tmp_uid[8] = '\0';
+               tmp_udat[16] = '\0';
+               memcpy(tmp_uid, conn->userid, 8);
+               memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
+               memcpy(tmp_udat, conn->userdata, 16);
+               EBCASC(tmp_udat, 16);
+               memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
+               sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
+               return buf;
+       } else
+               return netiucv_printname(conn->userid, 8);
+}
+
 /**
  * States of the interface statemachine.
  */
@@ -563,15 +590,18 @@ static int netiucv_callback_connreq(struct iucv_path *path,
 {
        struct iucv_connection *conn = path->private;
        struct iucv_event ev;
+       static char tmp_user[9];
+       static char tmp_udat[17];
        int rc;
 
-       if (memcmp(iucvMagic, ipuser, 16))
-               /* ipuser must match iucvMagic. */
-               return -EINVAL;
        rc = -EINVAL;
+       memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
+       memcpy(tmp_udat, ipuser, 16);
+       EBCASC(tmp_udat, 16);
        read_lock_bh(&iucv_connection_rwlock);
        list_for_each_entry(conn, &iucv_connection_list, list) {
-               if (strncmp(ipvmid, conn->userid, 8))
+               if (strncmp(ipvmid, conn->userid, 8) ||
+                   strncmp(ipuser, conn->userdata, 16))
                        continue;
                /* Found a matching connection for this path. */
                conn->path = path;
@@ -580,6 +610,8 @@ static int netiucv_callback_connreq(struct iucv_path *path,
                fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
                rc = 0;
        }
+       IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
+                      tmp_user, netiucv_printname(tmp_udat, 16));
        read_unlock_bh(&iucv_connection_rwlock);
        return rc;
 }
@@ -816,7 +848,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
        conn->path = path;
        path->msglim = NETIUCV_QUEUELEN_DEFAULT;
        path->flags = 0;
-       rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
+       rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
        if (rc) {
                IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
                return;
@@ -854,7 +886,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
 
        IUCV_DBF_TEXT(trace, 3, __func__);
        fsm_deltimer(&conn->timer);
-       iucv_path_sever(conn->path, NULL);
+       iucv_path_sever(conn->path, conn->userdata);
        fsm_newstate(fi, CONN_STATE_STARTWAIT);
 }
 
@@ -867,9 +899,9 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
        IUCV_DBF_TEXT(trace, 3, __func__);
 
        fsm_deltimer(&conn->timer);
-       iucv_path_sever(conn->path, NULL);
-       dev_info(privptr->dev, "The peer interface of the IUCV device"
-               " has closed the connection\n");
+       iucv_path_sever(conn->path, conn->userdata);
+       dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
+                              "connection\n", netiucv_printuser(conn));
        IUCV_DBF_TEXT(data, 2,
                      "conn_action_connsever: Remote dropped connection\n");
        fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -886,8 +918,6 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
        IUCV_DBF_TEXT(trace, 3, __func__);
 
        fsm_newstate(fi, CONN_STATE_STARTWAIT);
-       IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
-               netdev->name, conn->userid);
 
        /*
         * We must set the state before calling iucv_connect because the
@@ -897,8 +927,11 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
 
        fsm_newstate(fi, CONN_STATE_SETUPWAIT);
        conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
+       IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
+               netdev->name, netiucv_printuser(conn));
+
        rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
-                              NULL, iucvMagic, conn);
+                              NULL, conn->userdata, conn);
        switch (rc) {
        case 0:
                netdev->tx_queue_len = conn->path->msglim;
@@ -908,13 +941,13 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
        case 11:
                dev_warn(privptr->dev,
                        "The IUCV device failed to connect to z/VM guest %s\n",
-                       netiucv_printname(conn->userid));
+                       netiucv_printname(conn->userid, 8));
                fsm_newstate(fi, CONN_STATE_STARTWAIT);
                break;
        case 12:
                dev_warn(privptr->dev,
                        "The IUCV device failed to connect to the peer on z/VM"
-                       " guest %s\n", netiucv_printname(conn->userid));
+                       " guest %s\n", netiucv_printname(conn->userid, 8));
                fsm_newstate(fi, CONN_STATE_STARTWAIT);
                break;
        case 13:
@@ -927,7 +960,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
                dev_err(privptr->dev,
                        "z/VM guest %s has too many IUCV connections"
                        " to connect with the IUCV device\n",
-                       netiucv_printname(conn->userid));
+                       netiucv_printname(conn->userid, 8));
                fsm_newstate(fi, CONN_STATE_CONNERR);
                break;
        case 15:
@@ -972,7 +1005,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg)
        netiucv_purge_skb_queue(&conn->collect_queue);
        if (conn->path) {
                IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
-               iucv_path_sever(conn->path, iucvMagic);
+               iucv_path_sever(conn->path, conn->userdata);
                kfree(conn->path);
                conn->path = NULL;
        }
@@ -1090,7 +1123,8 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
                        fsm_newstate(fi, DEV_STATE_RUNNING);
                        dev_info(privptr->dev,
                                "The IUCV device has been connected"
-                               " successfully to %s\n", privptr->conn->userid);
+                               " successfully to %s\n",
+                               netiucv_printuser(privptr->conn));
                        IUCV_DBF_TEXT(setup, 3,
                                "connection is up and running\n");
                        break;
@@ -1452,45 +1486,72 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
        struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
-       return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
+       return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
 }
 
-static ssize_t user_write(struct device *dev, struct device_attribute *attr,
-                         const char *buf, size_t count)
+static int netiucv_check_user(const char *buf, size_t count, char *username,
+                             char *userdata)
 {
-       struct netiucv_priv *priv = dev_get_drvdata(dev);
-       struct net_device *ndev = priv->conn->netdev;
-       char    *p;
-       char    *tmp;
-       char    username[9];
-       int     i;
-       struct iucv_connection *cp;
+       const char *p;
+       int i;
 
-       IUCV_DBF_TEXT(trace, 3, __func__);
-       if (count > 9) {
-               IUCV_DBF_TEXT_(setup, 2,
-                              "%d is length of username\n", (int) count);
+       p = strchr(buf, '.');
+       if ((p && ((count > 26) ||
+                  ((p - buf) > 8) ||
+                  (buf + count - p > 18))) ||
+           (!p && (count > 9))) {
+               IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
                return -EINVAL;
        }
 
-       tmp = strsep((char **) &buf, "\n");
-       for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
-               if (isalnum(*p) || (*p == '$')) {
-                       username[i]= toupper(*p);
+       for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
+               if (isalnum(*p) || *p == '$') {
+                       username[i] = toupper(*p);
                        continue;
                }
-               if (*p == '\n') {
+               if (*p == '\n')
                        /* trailing lf, grr */
                        break;
-               }
                IUCV_DBF_TEXT_(setup, 2,
-                              "username: invalid character %c\n", *p);
+                              "conn_write: invalid character %02x\n", *p);
                return -EINVAL;
        }
        while (i < 8)
                username[i++] = ' ';
        username[8] = '\0';
 
+       if (*p == '.') {
+               p++;
+               for (i = 0; i < 16 && *p; i++, p++) {
+                       if (*p == '\n')
+                               break;
+                       userdata[i] = toupper(*p);
+               }
+               while (i > 0 && i < 16)
+                       userdata[i++] = ' ';
+       } else
+               memcpy(userdata, iucvMagic_ascii, 16);
+       userdata[16] = '\0';
+       ASCEBC(userdata, 16);
+
+       return 0;
+}
+
+static ssize_t user_write(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
+{
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
+       struct net_device *ndev = priv->conn->netdev;
+       char    username[9];
+       char    userdata[17];
+       int     rc;
+       struct iucv_connection *cp;
+
+       IUCV_DBF_TEXT(trace, 3, __func__);
+       rc = netiucv_check_user(buf, count, username, userdata);
+       if (rc)
+               return rc;
+
        if (memcmp(username, priv->conn->userid, 9) &&
            (ndev->flags & (IFF_UP | IFF_RUNNING))) {
                /* username changed while the interface is active. */
@@ -1499,15 +1560,17 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
        }
        read_lock_bh(&iucv_connection_rwlock);
        list_for_each_entry(cp, &iucv_connection_list, list) {
-               if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
+               if (!strncmp(username, cp->userid, 9) &&
+                  !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
                        read_unlock_bh(&iucv_connection_rwlock);
-                       IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
-                               "to %s already exists\n", username);
+                       IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
+                               "already exists\n", netiucv_printuser(cp));
                        return -EEXIST;
                }
        }
        read_unlock_bh(&iucv_connection_rwlock);
        memcpy(priv->conn->userid, username, 9);
+       memcpy(priv->conn->userdata, userdata, 17);
        return count;
 }
 
@@ -1537,7 +1600,8 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
        bs1 = simple_strtoul(buf, &e, 0);
 
        if (e && (!isspace(*e))) {
-               IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
+               IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
+                       *e);
                return -EINVAL;
        }
        if (bs1 > NETIUCV_BUFSIZE_MAX) {
@@ -1864,7 +1928,8 @@ static void netiucv_unregister_device(struct device *dev)
  * Add it to the list of netiucv connections;
  */
 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
-                                                     char *username)
+                                                     char *username,
+                                                     char *userdata)
 {
        struct iucv_connection *conn;
 
@@ -1893,6 +1958,8 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
        fsm_settimer(conn->fsm, &conn->timer);
        fsm_newstate(conn->fsm, CONN_STATE_INVALID);
 
+       if (userdata)
+               memcpy(conn->userdata, userdata, 17);
        if (username) {
                memcpy(conn->userid, username, 9);
                fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
@@ -1919,6 +1986,7 @@ out:
  */
 static void netiucv_remove_connection(struct iucv_connection *conn)
 {
+
        IUCV_DBF_TEXT(trace, 3, __func__);
        write_lock_bh(&iucv_connection_rwlock);
        list_del_init(&conn->list);
@@ -1926,7 +1994,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn)
        fsm_deltimer(&conn->timer);
        netiucv_purge_skb_queue(&conn->collect_queue);
        if (conn->path) {
-               iucv_path_sever(conn->path, iucvMagic);
+               iucv_path_sever(conn->path, conn->userdata);
                kfree(conn->path);
                conn->path = NULL;
        }
@@ -1985,7 +2053,7 @@ static void netiucv_setup_netdevice(struct net_device *dev)
 /**
  * Allocate and initialize everything of a net device.
  */
-static struct net_device *netiucv_init_netdevice(char *username)
+static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
 {
        struct netiucv_priv *privptr;
        struct net_device *dev;
@@ -2004,7 +2072,7 @@ static struct net_device *netiucv_init_netdevice(char *username)
        if (!privptr->fsm)
                goto out_netdev;
 
-       privptr->conn = netiucv_new_connection(dev, username);
+       privptr->conn = netiucv_new_connection(dev, username, userdata);
        if (!privptr->conn) {
                IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
                goto out_fsm;
@@ -2022,47 +2090,31 @@ out_netdev:
 static ssize_t conn_write(struct device_driver *drv,
                          const char *buf, size_t count)
 {
-       const char *p;
        char username[9];
-       int i, rc;
+       char userdata[17];
+       int rc;
        struct net_device *dev;
        struct netiucv_priv *priv;
        struct iucv_connection *cp;
 
        IUCV_DBF_TEXT(trace, 3, __func__);
-       if (count>9) {
-               IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
-               return -EINVAL;
-       }
-
-       for (i = 0, p = buf; i < 8 && *p; i++, p++) {
-               if (isalnum(*p) || *p == '$') {
-                       username[i] = toupper(*p);
-                       continue;
-               }
-               if (*p == '\n')
-                       /* trailing lf, grr */
-                       break;
-               IUCV_DBF_TEXT_(setup, 2,
-                              "conn_write: invalid character %c\n", *p);
-               return -EINVAL;
-       }
-       while (i < 8)
-               username[i++] = ' ';
-       username[8] = '\0';
+       rc = netiucv_check_user(buf, count, username, userdata);
+       if (rc)
+               return rc;
 
        read_lock_bh(&iucv_connection_rwlock);
        list_for_each_entry(cp, &iucv_connection_list, list) {
-               if (!strncmp(username, cp->userid, 9)) {
+               if (!strncmp(username, cp->userid, 9) &&
+                   !strncmp(userdata, cp->userdata, 17)) {
                        read_unlock_bh(&iucv_connection_rwlock);
-                       IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
-                               "to %s already exists\n", username);
+                       IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
+                               "already exists\n", netiucv_printuser(cp));
                        return -EEXIST;
                }
        }
        read_unlock_bh(&iucv_connection_rwlock);
 
-       dev = netiucv_init_netdevice(username);
+       dev = netiucv_init_netdevice(username, userdata);
        if (!dev) {
                IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
                return -ENODEV;
@@ -2083,8 +2135,9 @@ static ssize_t conn_write(struct device_driver *drv,
        if (rc)
                goto out_unreg;
 
-       dev_info(priv->dev, "The IUCV interface to %s has been"
-               " established successfully\n", netiucv_printname(username));
+       dev_info(priv->dev, "The IUCV interface to %s has been established "
+                           "successfully\n",
+               netiucv_printuser(priv->conn));
 
        return count;
 
index fff57de..4fae1dc 100644 (file)
@@ -66,7 +66,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
                struct qeth_qdio_out_buffer *buf,
                enum qeth_qdio_buffer_states newbufstate);
-
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 static inline const char *qeth_get_cardname(struct qeth_card *card)
 {
@@ -363,6 +363,9 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
 static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
        int bidx, int forced_cleanup)
 {
+       if (q->card->options.cq != QETH_CQ_ENABLED)
+               return;
+
        if (q->bufs[bidx]->next_pending != NULL) {
                struct qeth_qdio_out_buffer *head = q->bufs[bidx];
                struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
@@ -390,6 +393,13 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
 
                }
        }
+       if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+                                       QETH_QDIO_BUF_HANDLED_DELAYED)) {
+               /* for recovery situations */
+               q->bufs[bidx]->aob = q->bufstates[bidx].aob;
+               qeth_init_qdio_out_buf(q, bidx);
+               QETH_CARD_TEXT(q->card, 2, "clprecov");
+       }
 }
 
 
@@ -412,7 +422,6 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
                notification = TX_NOTIFY_OK;
        } else {
                BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
-
                atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
                notification = TX_NOTIFY_DELAYED_OK;
        }
@@ -425,7 +434,8 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
 
        buffer->aob = NULL;
        qeth_clear_output_buffer(buffer->q, buffer,
-                               QETH_QDIO_BUF_HANDLED_DELAYED);
+                                QETH_QDIO_BUF_HANDLED_DELAYED);
+
        /* from here on: do not touch buffer anymore */
        qdio_release_aob(aob);
 }
@@ -1113,11 +1123,25 @@ out:
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
 {
        struct sk_buff *skb;
+       struct iucv_sock *iucv;
+       int notify_general_error = 0;
+
+       if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+               notify_general_error = 1;
+
+       /* release may never happen from within CQ tasklet scope */
+       BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
 
        skb = skb_dequeue(&buf->skb_list);
        while (skb) {
                QETH_CARD_TEXT(buf->q->card, 5, "skbr");
                QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
+               if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
+                       if (skb->sk) {
+                               iucv = iucv_sk(skb->sk);
+                               iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
+                       }
+               }
                atomic_dec(&skb->users);
                dev_kfree_skb_any(skb);
                skb = skb_dequeue(&buf->skb_list);
@@ -1160,7 +1184,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
        for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
                if (!q->bufs[j])
                        continue;
-               qeth_cleanup_handled_pending(q, j, free);
+               qeth_cleanup_handled_pending(q, j, 1);
                qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
                if (free) {
                        kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
@@ -1207,7 +1231,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
        qeth_free_cq(card);
        cancel_delayed_work_sync(&card->buffer_reclaim_work);
        for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
-               kfree_skb(card->qdio.in_q->bufs[j].rx_skb);
+               dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
        kfree(card->qdio.in_q);
        card->qdio.in_q = NULL;
        /* inbound buffer pool */
@@ -1329,6 +1353,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
 
 static void qeth_start_kernel_thread(struct work_struct *work)
 {
+       struct task_struct *ts;
        struct qeth_card *card = container_of(work, struct qeth_card,
                                        kernel_thread_starter);
        QETH_CARD_TEXT(card , 2, "strthrd");
@@ -1336,9 +1361,15 @@ static void qeth_start_kernel_thread(struct work_struct *work)
        if (card->read.state != CH_STATE_UP &&
            card->write.state != CH_STATE_UP)
                return;
-       if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
-               kthread_run(card->discipline.recover, (void *) card,
+       if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
+               ts = kthread_run(card->discipline.recover, (void *)card,
                                "qeth_recover");
+               if (IS_ERR(ts)) {
+                       qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+                       qeth_clear_thread_running_bit(card,
+                               QETH_RECOVER_THREAD);
+               }
+       }
 }
 
 static int qeth_setup_card(struct qeth_card *card)
index a21ae3d..c129671 100644 (file)
@@ -301,21 +301,21 @@ static void qeth_l2_process_vlans(struct qeth_card *card)
        spin_unlock_bh(&card->vlanlock);
 }
 
-static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_card *card = dev->ml_priv;
        struct qeth_vlan_vid *id;
 
        QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
        if (!vid)
-               return;
+               return 0;
        if (card->info.type == QETH_CARD_TYPE_OSM) {
                QETH_CARD_TEXT(card, 3, "aidOSM");
-               return;
+               return 0;
        }
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "aidREC");
-               return;
+               return 0;
        }
        id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
        if (id) {
@@ -324,10 +324,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                spin_lock_bh(&card->vlanlock);
                list_add_tail(&id->list, &card->vid_list);
                spin_unlock_bh(&card->vlanlock);
+       } else {
+               return -ENOMEM;
        }
+       return 0;
 }
 
-static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_vlan_vid *id, *tmpid = NULL;
        struct qeth_card *card = dev->ml_priv;
@@ -335,11 +338,11 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
        if (card->info.type == QETH_CARD_TYPE_OSM) {
                QETH_CARD_TEXT(card, 3, "kidOSM");
-               return;
+               return 0;
        }
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "kidREC");
-               return;
+               return 0;
        }
        spin_lock_bh(&card->vlanlock);
        list_for_each_entry(id, &card->vid_list, list) {
@@ -355,6 +358,7 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
                kfree(tmpid);
        }
        qeth_l2_set_multicast_list(card->dev);
+       return 0;
 }
 
 static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -1169,6 +1173,7 @@ static void __exit qeth_l2_exit(void)
 static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+       qeth_set_allowed_threads(card, 0, 1);
        if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
                qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
        qeth_qdio_clear_card(card, 0);
index 6357892..9648e4e 100644 (file)
@@ -1869,15 +1869,15 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
        qeth_l3_free_vlan_addresses6(card, vid);
 }
 
-static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_card *card = dev->ml_priv;
 
        set_bit(vid, card->active_vlans);
-       return;
+       return 0;
 }
 
-static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_card *card = dev->ml_priv;
        unsigned long flags;
@@ -1885,7 +1885,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "kidREC");
-               return;
+               return 0;
        }
        spin_lock_irqsave(&card->vlanlock, flags);
        /* unregister IP addresses of vlan device */
@@ -1893,6 +1893,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        clear_bit(vid, card->active_vlans);
        spin_unlock_irqrestore(&card->vlanlock, flags);
        qeth_l3_set_multicast_list(card->dev);
+       return 0;
 }
 
 static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
@@ -2759,7 +2760,7 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
        rcu_read_lock();
        dst = skb_dst(skb);
        if (dst)
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_noref(dst);
        if (n) {
                cast_type = n->type;
                rcu_read_unlock();
@@ -2855,7 +2856,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
        rcu_read_lock();
        dst = skb_dst(skb);
        if (dst)
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_noref(dst);
        if (ipv == 4) {
                /* IPv4 */
                hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
@@ -3491,14 +3492,13 @@ contin:
        else
                netif_carrier_off(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
+               rtnl_lock();
                if (recovery_mode)
                        __qeth_l3_open(card->dev);
-               else {
-                       rtnl_lock();
+               else
                        dev_open(card->dev);
-                       rtnl_unlock();
-               }
                qeth_l3_set_multicast_list(card->dev);
+               rtnl_unlock();
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -3544,6 +3544,11 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
                card->info.hwtrap = 1;
        }
        qeth_l3_stop_card(card, recovery_mode);
+       if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
+               rtnl_lock();
+               call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
+               rtnl_unlock();
+       }
        rc  = ccw_device_set_offline(CARD_DDEV(card));
        rc2 = ccw_device_set_offline(CARD_WDEV(card));
        rc3 = ccw_device_set_offline(CARD_RDEV(card));
@@ -3598,6 +3603,7 @@ static int qeth_l3_recover(void *ptr)
 static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+       qeth_set_allowed_threads(card, 0, 1);
        if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
                qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
        qeth_qdio_clear_card(card, 0);
index 11f07f8..b79576b 100644 (file)
@@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
 {
        struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
+       /* if previous slave_alloc returned early, there is nothing to do */
+       if (!zfcp_sdev->port)
+               return;
+
        zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
        put_device(&zfcp_sdev->port->dev);
 }
index 5f94d22..5426682 100644 (file)
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
        int ret = 0;
 
        while (len > 0) {
-               int err = bbc_i2c_writeb(client, *buf, off);
-
-               if (err < 0) {
-                       ret = err;
+               ret = bbc_i2c_writeb(client, *buf, off);
+               if (ret < 0)
                        break;
-               }
-
                len--;
                buf++;
                off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
        int ret = 0;
 
        while (len > 0) {
-               int err = bbc_i2c_readb(client, buf, off);
-               if (err < 0) {
-                       ret = err;
+               ret = bbc_i2c_readb(client, buf, off);
+               if (ret < 0)
                        break;
-               }
                len--;
                buf++;
                off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
        .remove         = __devexit_p(bbc_i2c_remove),
 };
 
-static int __init bbc_i2c_init(void)
-{
-       return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
-       platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
 
 MODULE_LICENSE("GPL");
index 965a1fc..4b99397 100644 (file)
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
        .remove         = __devexit_p(d7s_remove),
 };
 
-static int __init d7s_init(void)
-{
-       return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
-       platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
index be7b4e5..339fd6f 100644 (file)
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
        .remove         = __devexit_p(envctrl_remove),
 };
 
-static int __init envctrl_init(void)
-{
-       return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
-       platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
 
-module_init(envctrl_init);
-module_exit(envctrl_exit);
 MODULE_LICENSE("GPL");
index 73dd4e7..826157f 100644 (file)
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
        .remove         = __devexit_p(flash_remove),
 };
 
-static int __init flash_init(void)
-{
-       return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
-       platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
 
-module_init(flash_init);
-module_exit(flash_cleanup);
 MODULE_LICENSE("GPL");
index ebce963..0b31658 100644 (file)
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
 };
 
 
-static int __init uctrl_init(void)
-{
-       return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
-       platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
 
-module_init(uctrl_init);
-module_exit(uctrl_exit);
 MODULE_LICENSE("GPL");
index dba72a4..1ad0b82 100644 (file)
@@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
        spin_lock(&session->lock);
        task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
                                 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
-       if (!task) {
+       if (!task || !task->sc) {
                spin_unlock(&session->lock);
                return -EINVAL;
        }
        sc = task->sc;
-       spin_unlock(&session->lock);
 
        if (!blk_rq_cpu_valid(sc->request))
                cpu = smp_processor_id();
        else
                cpu = sc->request->cpu;
 
+       spin_unlock(&session->lock);
+
        p = &per_cpu(bnx2i_percpu, cpu);
        spin_lock(&p->p_work_lock);
        if (unlikely(!p->iothread)) {
index 000294a..36739da 100644 (file)
@@ -966,7 +966,7 @@ static int init_act_open(struct cxgbi_sock *csk)
                csk->saddr.sin_addr.s_addr = chba->ipv4addr;
 
        csk->rss_qid = 0;
-       csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev);
+       csk->l2t = t3_l2t_get(t3dev, dst, ndev);
        if (!csk->l2t) {
                pr_err("NO l2t available.\n");
                return -EINVAL;
index ac7a9b1..5a4a3bf 100644 (file)
@@ -1127,6 +1127,7 @@ static int init_act_open(struct cxgbi_sock *csk)
        struct net_device *ndev = cdev->ports[csk->port_id];
        struct port_info *pi = netdev_priv(ndev);
        struct sk_buff *skb = NULL;
+       struct neighbour *n;
        unsigned int step;
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -1141,7 +1142,12 @@ static int init_act_open(struct cxgbi_sock *csk)
        cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
        cxgbi_sock_get(csk);
 
-       csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0);
+       n = dst_get_neighbour_noref(csk->dst);
+       if (!n) {
+               pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
+               goto rel_resource;
+       }
+       csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
        if (!csk->l2t) {
                pr_err("%s, cannot alloc l2t.\n", ndev->name);
                goto rel_resource;
index c10f74a..1d25a87 100644 (file)
@@ -472,6 +472,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
        struct net_device *ndev;
        struct cxgbi_device *cdev;
        struct rtable *rt = NULL;
+       struct neighbour *n;
        struct flowi4 fl4;
        struct cxgbi_sock *csk = NULL;
        unsigned int mtu = 0;
@@ -493,7 +494,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
                goto err_out;
        }
        dst = &rt->dst;
-       ndev = dst_get_neighbour(dst)->dev;
+       n = dst_get_neighbour_noref(dst);
+       if (!n) {
+               err = -ENODEV;
+               goto rel_rt;
+       }
+       ndev = n->dev;
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
                pr_info("multi-cast route %pI4, port %u, dev %s.\n",
@@ -507,7 +513,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
                ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
                mtu = ndev->mtu;
                pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
-                       dst_get_neighbour(dst)->dev->name, ndev->name, mtu);
+                       n->dev->name, ndev->name, mtu);
        }
 
        cdev = cxgbi_device_find_by_netdev(ndev, &port);
index cefbe44..8d67467 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/sysfs.h>
 #include <linux/ctype.h>
 #include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
@@ -101,6 +103,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
 static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
                           unsigned int);
 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+                                    ulong event, void *ptr);
 
 static bool fcoe_match(struct net_device *netdev);
 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +133,11 @@ static struct notifier_block fcoe_cpu_notifier = {
        .notifier_call = fcoe_cpu_callback,
 };
 
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+       .notifier_call = fcoe_dcb_app_notification,
+};
+
 static struct scsi_transport_template *fcoe_nport_scsi_transport;
 static struct scsi_transport_template *fcoe_vport_scsi_transport;
 
@@ -1522,6 +1531,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        skb_reset_network_header(skb);
        skb->mac_len = elen;
        skb->protocol = htons(ETH_P_FCOE);
+       skb->priority = port->priority;
+
        if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
            fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
                skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
        stats->InvalidCRCCount++;
        if (stats->InvalidCRCCount < 5)
                printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+       put_cpu();
        return -EINVAL;
 }
 
@@ -1746,6 +1758,7 @@ int fcoe_percpu_receive_thread(void *arg)
  */
 static void fcoe_dev_setup(void)
 {
+       register_dcbevent_notifier(&dcb_notifier);
        register_netdevice_notifier(&fcoe_notifier);
 }
 
@@ -1754,9 +1767,69 @@ static void fcoe_dev_setup(void)
  */
 static void fcoe_dev_cleanup(void)
 {
+       unregister_dcbevent_notifier(&dcb_notifier);
        unregister_netdevice_notifier(&fcoe_notifier);
 }
 
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+       struct fcoe_interface *fcoe;
+       struct net_device *real_dev;
+
+       list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+               if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+                       real_dev = vlan_dev_real_dev(fcoe->netdev);
+               else
+                       real_dev = fcoe->netdev;
+
+               if (netdev == real_dev)
+                       return fcoe;
+       }
+       return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+                                    ulong event, void *ptr)
+{
+       struct dcb_app_type *entry = ptr;
+       struct fcoe_interface *fcoe;
+       struct net_device *netdev;
+       struct fcoe_port *port;
+       int prio;
+
+       if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+               return NOTIFY_OK;
+
+       netdev = dev_get_by_index(&init_net, entry->ifindex);
+       if (!netdev)
+               return NOTIFY_OK;
+
+       fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+       dev_put(netdev);
+       if (!fcoe)
+               return NOTIFY_OK;
+
+       if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+               prio = ffs(entry->app.priority) - 1;
+       else
+               prio = entry->app.priority;
+
+       if (prio < 0)
+               return NOTIFY_OK;
+
+       if (entry->app.protocol == ETH_P_FIP ||
+           entry->app.protocol == ETH_P_FCOE)
+               fcoe->ctlr.priority = prio;
+
+       if (entry->app.protocol == ETH_P_FCOE) {
+               port = lport_priv(fcoe->ctlr.lp);
+               port->priority = prio;
+       }
+
+       return NOTIFY_OK;
+}
+
 /**
  * fcoe_device_notification() - Handler for net device events
  * @notifier: The context of the notification
@@ -1964,6 +2037,46 @@ static bool fcoe_match(struct net_device *netdev)
        return true;
 }
 
+/**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+       int dcbx;
+       u8 fup, up;
+       struct net_device *netdev = fcoe->realdev;
+       struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+       struct dcb_app app = {
+                               .priority = 0,
+                               .protocol = ETH_P_FCOE
+                            };
+
+       /* setup DCB priority attributes. */
+       if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+               dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+               if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+                       app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+                       up = dcb_ieee_getapp_mask(netdev, &app);
+                       app.protocol = ETH_P_FIP;
+                       fup = dcb_ieee_getapp_mask(netdev, &app);
+               } else {
+                       app.selector = DCB_APP_IDTYPE_ETHTYPE;
+                       up = dcb_getapp(netdev, &app);
+                       app.protocol = ETH_P_FIP;
+                       fup = dcb_getapp(netdev, &app);
+               }
+
+               port->priority = ffs(up) ? ffs(up) - 1 : 0;
+               fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+       }
+#endif
+}
+
 /**
  * fcoe_create() - Create a fcoe interface
  * @netdev  : The net_device object the Ethernet interface to create on
@@ -2007,6 +2120,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        /* Make this the "master" N_Port */
        fcoe->ctlr.lp = lport;
 
+       /* setup DCB priority attributes. */
+       fcoe_dcb_create(fcoe);
+
        /* add to lports list */
        fcoe_hostlist_add(lport);
 
index c74c4b8..e7522dc 100644 (file)
@@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
 
        skb_put(skb, sizeof(*sol));
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        fip->send(fip, skb);
@@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
        }
        skb_put(skb, len);
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        fip->send(fip, skb);
@@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
        cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
 
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        return 0;
@@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
 
        skb_put(skb, len);
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
 
index 4e041f6..d570573 100644 (file)
@@ -4335,7 +4335,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
        /* insert into event log */
        sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
             sizeof(Mpi2EventDataSasDeviceStatusChange_t);
-       event_reply = kzalloc(sz, GFP_KERNEL);
+       event_reply = kzalloc(sz, GFP_ATOMIC);
        if (!event_reply) {
                printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
                    ioc->name, __FILE__, __LINE__, __func__);
index ac326c4..6465dae 100644 (file)
@@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
        scsi_qla_host_t *vha = shost_priv(shost);
        struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
 
-       if (!base_vha->flags.online)
+       if (!base_vha->flags.online) {
                fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
-       else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
-               fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
-       else
+               return;
+       }
+
+       switch (atomic_read(&base_vha->loop_state)) {
+       case LOOP_UPDATE:
+               fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+               break;
+       case LOOP_DOWN:
+               if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+                       fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+               else
+                       fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+               break;
+       case LOOP_DEAD:
+               fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+               break;
+       case LOOP_READY:
                fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+               break;
+       default:
+               fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+               break;
+       }
 }
 
 static int
index 9df4787..f3cddd5 100644 (file)
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
  * | Module Init and Probe        |       0x0116       |               |
- * | Mailbox commands             |       0x1129       |               |
+ * | Mailbox commands             |       0x112b       |               |
  * | Device Discovery             |       0x2083       |               |
  * | Queue Command and IO tracing |       0x302e       |     0x3008     |
  * | DPC Thread                   |       0x401c       |               |
  * | Async Events                 |       0x5059       |               |
- * | Timer Routines               |       0x600d       |               |
+ * | Timer Routines               |       0x6010       | 0x600e,0x600f  |
  * | User Space Interactions      |       0x709d       |               |
- * | Task Management              |       0x8041       |               |
+ * | Task Management              |       0x8041       | 0x800b         |
  * | AER/EEH                      |       0x900f       |               |
  * | Virtual Port                 |       0xa007       |               |
- * | ISP82XX Specific             |       0xb051       |               |
+ * | ISP82XX Specific             |       0xb052       |               |
  * | MultiQ                       |       0xc00b       |               |
  * | Misc                         |       0xd00b       |               |
  * ----------------------------------------------------------------------
index ce32d81..c0c11af 100644 (file)
@@ -578,6 +578,7 @@ extern int qla82xx_check_md_needed(scsi_qla_host_t *);
 extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
 extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
 extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
 
 /* BSG related functions */
 extern int qla24xx_bsg_request(struct fc_bsg_job *);
index f03e915..54ea68c 100644 (file)
@@ -1509,7 +1509,8 @@ enable_82xx_npiv:
                                    &ha->fw_xcb_count, NULL, NULL,
                                    &ha->max_npiv_vports, NULL);
 
-                               if (!fw_major_version && ql2xallocfwdump)
+                               if (!fw_major_version && ql2xallocfwdump
+                                   && !IS_QLA82XX(ha))
                                        qla2x00_alloc_fw_dump(vha);
                        }
                } else {
index dbec896..a4b267e 100644 (file)
@@ -120,11 +120,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  * Returns a pointer to the continuation type 1 IOCB packet.
  */
 static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 {
        cont_a64_entry_t *cont_pkt;
 
-       struct req_que *req = vha->req;
        /* Adjust ring index. */
        req->ring_index++;
        if (req->ring_index == req->length) {
@@ -292,7 +291,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -684,7 +683,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -2070,7 +2069,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
                        * Five DSDs are available in the Cont.
                        * Type 1 IOCB.
                               */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+                           vha->hw->req_q_map[0]);
                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                        cont_iocb_prsnt = 1;
@@ -2096,6 +2096,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
        int index;
        uint16_t tot_dsds;
         scsi_qla_host_t *vha = sp->fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
        struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
        int loop_iterartion = 0;
        int cont_iocb_prsnt = 0;
@@ -2141,7 +2142,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
                        * Five DSDs are available in the Cont.
                        * Type 1 IOCB.
                               */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+                           ha->req_q_map[0]);
                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                        cont_iocb_prsnt = 1;
index 2516adf..7b91b29 100644 (file)
@@ -1741,7 +1741,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                                    resid, scsi_bufflen(cp));
 
                                cp->result = DID_ERROR << 16 | lscsi_status;
-                               break;
+                               goto check_scsi_status;
                        }
 
                        if (!lscsi_status &&
index 3b3cec9..82a3353 100644 (file)
@@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                mcp->mb[0] = MBS_LINK_DOWN_ERROR;
                ql_log(ql_log_warn, base_vha, 0x1004,
                    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
-               rval = QLA_FUNCTION_FAILED;
-               goto premature_exit;
+               return QLA_FUNCTION_TIMEOUT;
        }
 
        /*
@@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                HINT_MBX_INT_PENDING) {
                                spin_unlock_irqrestore(&ha->hardware_lock,
                                        flags);
+                               ha->flags.mbox_busy = 0;
                                ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
                                    "Pending mailbox timeout, exiting.\n");
                                rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                HINT_MBX_INT_PENDING) {
                                spin_unlock_irqrestore(&ha->hardware_lock,
                                        flags);
+                               ha->flags.mbox_busy = 0;
                                ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
                                    "Pending mailbox timeout, exiting.\n");
                                rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
                            !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
                            !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+                               if (IS_QLA82XX(ha)) {
+                                       ql_dbg(ql_dbg_mbx, vha, 0x112a,
+                                           "disabling pause transmit on port "
+                                           "0 & 1.\n");
+                                       qla82xx_wr_32(ha,
+                                           QLA82XX_CRB_NIU + 0x98,
+                                           CRB_NIU_XG_PAUSE_CTL_P0|
+                                           CRB_NIU_XG_PAUSE_CTL_P1);
+                               }
                                ql_log(ql_log_info, base_vha, 0x101c,
                                    "Mailbox cmd timeout occured. "
                                    "Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
                            !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
                            !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+                               if (IS_QLA82XX(ha)) {
+                                       ql_dbg(ql_dbg_mbx, vha, 0x112b,
+                                           "disabling pause transmit on port "
+                                           "0 & 1.\n");
+                                       qla82xx_wr_32(ha,
+                                           QLA82XX_CRB_NIU + 0x98,
+                                           CRB_NIU_XG_PAUSE_CTL_P0|
+                                           CRB_NIU_XG_PAUSE_CTL_P1);
+                               }
                                ql_log(ql_log_info, base_vha, 0x101e,
                                    "Mailbox cmd timeout occured. "
                                    "Scheduling ISP abort.\n");
index 94bded5..0355493 100644 (file)
@@ -3817,6 +3817,20 @@ exit:
        return rval;
 }
 
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (ha->flags.mbox_busy) {
+               ha->flags.mbox_int = 1;
+               ha->flags.mbox_busy = 0;
+               ql_log(ql_log_warn, vha, 0x6010,
+                   "Doing premature completion of mbx command.\n");
+               if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+                       complete(&ha->mbx_intr_comp);
+       }
+}
+
 void qla82xx_watchdog(scsi_qla_host_t *vha)
 {
        uint32_t dev_state, halt_status;
@@ -3839,9 +3853,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                        qla2xxx_wake_dpc(vha);
                } else {
                        if (qla82xx_check_fw_alive(vha)) {
+                               ql_dbg(ql_dbg_timer, vha, 0x6011,
+                                   "disabling pause transmit on port 0 & 1.\n");
+                               qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                                   CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
                                halt_status = qla82xx_rd_32(ha,
                                    QLA82XX_PEG_HALT_STATUS1);
-                               ql_dbg(ql_dbg_timer, vha, 0x6005,
+                               ql_log(ql_log_info, vha, 0x6005,
                                    "dumping hw/fw registers:.\n "
                                    " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
                                    " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3876,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                            QLA82XX_CRB_PEG_NET_3 + 0x3c),
                                    qla82xx_rd_32(ha,
                                            QLA82XX_CRB_PEG_NET_4 + 0x3c));
+                               if (LSW(MSB(halt_status)) == 0x67)
+                                       ql_log(ql_log_warn, vha, 0xb052,
+                                           "Firmware aborted with "
+                                           "error code 0x00006700. Device is "
+                                           "being reset.\n");
                                if (halt_status & HALT_STATUS_UNRECOVERABLE) {
                                        set_bit(ISP_UNRECOVERABLE,
                                            &vha->dpc_flags);
@@ -3869,16 +3892,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                }
                                qla2xxx_wake_dpc(vha);
                                ha->flags.isp82xx_fw_hung = 1;
-                               if (ha->flags.mbox_busy) {
-                                       ha->flags.mbox_int = 1;
-                                       ql_log(ql_log_warn, vha, 0x6007,
-                                           "Due to FW hung, doing "
-                                           "premature completion of mbx "
-                                           "command.\n");
-                                       if (test_bit(MBX_INTR_WAIT,
-                                           &ha->mbx_cmd_flags))
-                                               complete(&ha->mbx_intr_comp);
-                               }
+                               ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+                               qla82xx_clear_pending_mbx(vha);
                        }
                }
        }
@@ -4073,10 +4088,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
                        msleep(1000);
                        if (qla82xx_check_fw_alive(vha)) {
                                ha->flags.isp82xx_fw_hung = 1;
-                               if (ha->flags.mbox_busy) {
-                                       ha->flags.mbox_int = 1;
-                                       complete(&ha->mbx_intr_comp);
-                               }
+                               qla82xx_clear_pending_mbx(vha);
                                break;
                        }
                }
index 57820c1..57a226b 100644 (file)
@@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
 
 static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
        0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0        0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1        0x8
+
 #endif
index fd14c7b..f9e5b85 100644 (file)
@@ -201,12 +201,12 @@ MODULE_PARM_DESC(ql2xmdcapmask,
                "Set the Minidump driver capture mask level. "
                "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
 
-int ql2xmdenable;
+int ql2xmdenable = 1;
 module_param(ql2xmdenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xmdenable,
                "Enable/disable MiniDump. "
-               "0 (Default) - MiniDump disabled. "
-               "1 - MiniDump enabled.");
+               "0 - MiniDump disabled. "
+               "1 (Default) - MiniDump enabled.");
 
 /*
  * SCSI host template entry points
@@ -423,6 +423,7 @@ fail2:
        qla25xx_delete_queues(vha);
        destroy_workqueue(ha->wq);
        ha->wq = NULL;
+       vha->req = ha->req_q_map[0];
 fail:
        ha->mqenable = 0;
        kfree(ha->req_q_map);
@@ -814,49 +815,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
        return return_status;
 }
 
-/*
- * qla2x00_wait_for_loop_ready
- *    Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
- *    to be in LOOP_READY state.
- * Input:
- *     ha - pointer to host adapter structure
- *
- * Note:
- *    Does context switching-Release SPIN_LOCK
- *    (if any) before calling this routine.
- *
- *
- * Return:
- *    Success (LOOP_READY) : 0
- *    Failed  (LOOP_NOT_READY) : 1
- */
-static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
-{
-       int      return_status = QLA_SUCCESS;
-       unsigned long loop_timeout ;
-       struct qla_hw_data *ha = vha->hw;
-       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
-
-       /* wait for 5 min at the max for loop to be ready */
-       loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-
-       while ((!atomic_read(&base_vha->loop_down_timer) &&
-           atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
-           atomic_read(&base_vha->loop_state) != LOOP_READY) {
-               if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
-                       return_status = QLA_FUNCTION_FAILED;
-                       break;
-               }
-               msleep(1000);
-               if (time_after_eq(jiffies, loop_timeout)) {
-                       return_status = QLA_FUNCTION_FAILED;
-                       break;
-               }
-       }
-       return (return_status);
-}
-
 static void
 sp_get(struct srb *sp)
 {
@@ -1035,12 +993,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
                    "Wait for hba online failed for cmd=%p.\n", cmd);
                goto eh_reset_failed;
        }
-       err = 1;
-       if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
-               ql_log(ql_log_warn, vha, 0x800b,
-                   "Wait for loop ready failed for cmd=%p.\n", cmd);
-               goto eh_reset_failed;
-       }
        err = 2;
        if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
                != QLA_SUCCESS) {
@@ -1137,10 +1089,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
                goto eh_bus_reset_done;
        }
 
-       if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
-               if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
-                       ret = SUCCESS;
-       }
+       if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+               ret = SUCCESS;
+
        if (ret == FAILED)
                goto eh_bus_reset_done;
 
@@ -1206,15 +1157,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
        if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
                goto eh_host_reset_lock;
 
-       /*
-        * Fixme-may be dpc thread is active and processing
-        * loop_resync,so wait a while for it to
-        * be completed and then issue big hammer.Otherwise
-        * it may cause I/O failure as big hammer marks the
-        * devices as lost kicking of the port_down_timer
-        * while dpc is stuck for the mailbox to complete.
-        */
-       qla2x00_wait_for_loop_ready(vha);
        if (vha != base_vha) {
                if (qla2x00_vp_abort_isp(vha))
                        goto eh_host_reset_lock;
@@ -1297,16 +1239,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
                atomic_set(&vha->loop_state, LOOP_DOWN);
                atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
                qla2x00_mark_all_devices_lost(vha, 0);
-               qla2x00_wait_for_loop_ready(vha);
        }
 
        if (ha->flags.enable_lip_reset) {
                ret = qla2x00_lip_reset(vha);
-               if (ret != QLA_SUCCESS) {
+               if (ret != QLA_SUCCESS)
                        ql_dbg(ql_dbg_taskm, vha, 0x802e,
                            "lip_reset failed (%d).\n", ret);
-               } else
-                       qla2x00_wait_for_loop_ready(vha);
        }
 
        /* Issue marker command only when we are going to start the I/O */
@@ -4070,13 +4009,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
                /* For ISP82XX complete any pending mailbox cmd */
                if (IS_QLA82XX(ha)) {
                        ha->flags.isp82xx_fw_hung = 1;
-                       if (ha->flags.mbox_busy) {
-                               ha->flags.mbox_int = 1;
-                               ql_dbg(ql_dbg_aer, vha, 0x9001,
-                                   "Due to pci channel io frozen, doing premature "
-                                   "completion of mbx command.\n");
-                               complete(&ha->mbx_intr_comp);
-                       }
+                       ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+                       qla82xx_clear_pending_mbx(vha);
                }
                qla2x00_free_irqs(vha);
                pci_disable_device(pdev);
index 13b6357..23f33a6 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.07.07-k"
+#define QLA2XXX_VERSION      "8.03.07.12-k"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   3
index ace637b..fd5edc6 100644 (file)
 #define ISCSI_ALIAS_SIZE               32      /* ISCSI Alias name size */
 #define ISCSI_NAME_SIZE                        0xE0    /* ISCSI Name size */
 
-#define QL4_SESS_RECOVERY_TMO          30      /* iSCSI session */
+#define QL4_SESS_RECOVERY_TMO          120     /* iSCSI session */
                                                /* recovery timeout */
 
 #define LSDW(x) ((u32)((u64)(x)))
 #define ISNS_DEREG_TOV                 5
 #define HBA_ONLINE_TOV                 30
 #define DISABLE_ACB_TOV                        30
+#define IP_CONFIG_TOV                  30
+#define LOGIN_TOV                      12
 
 #define MAX_RESET_HA_RETRIES           2
 
@@ -240,6 +242,45 @@ struct ddb_entry {
 
        uint16_t fw_ddb_index;  /* DDB firmware index */
        uint32_t fw_ddb_device_state; /* F/W Device State  -- see ql4_fw.h */
+       uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+       struct dev_db_entry fw_ddb_entry;
+       int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+       int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                         struct ddb_entry *ddb_entry, uint32_t state);
+
+       /* Driver Re-login  */
+       unsigned long flags;              /* DDB Flags */
+       uint16_t default_relogin_timeout; /*  Max time to wait for
+                                          *  relogin to complete */
+       atomic_t retry_relogin_timer;     /* Min Time between relogins
+                                          * (4000 only) */
+       atomic_t relogin_timer;           /* Max Time to wait for
+                                          * relogin to complete */
+       atomic_t relogin_retry_count;     /* Num of times relogin has been
+                                          * retried */
+       uint32_t default_time2wait;       /* Default Min time between
+                                          * relogins (+aens) */
+
+};
+
+struct qla_ddb_index {
+       struct list_head list;
+       uint16_t fw_ddb_idx;
+       struct dev_db_entry fw_ddb;
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+       int port;
+       int tpgt;
+       char ip_addr[DDB_IPADDR_LEN];
+       char iscsi_name[ISCSI_NAME_SIZE];
+       uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
 };
 
 /*
@@ -411,7 +452,7 @@ struct scsi_qla_host {
 #define AF_FW_RECOVERY                 19 /* 0x00080000 */
 #define AF_EEH_BUSY                    20 /* 0x00100000 */
 #define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
-
+#define AF_BUILD_DDB_LIST              22 /* 0x00400000 */
        unsigned long dpc_flags;
 
 #define DPC_RESET_HA                   1 /* 0x00000002 */
@@ -604,6 +645,7 @@ struct scsi_qla_host {
        uint16_t bootload_minor;
        uint16_t bootload_patch;
        uint16_t bootload_build;
+       uint16_t def_timeout; /* Default login timeout */
 
        uint32_t flash_state;
 #define        QLFLASH_WAITING         0
@@ -623,6 +665,11 @@ struct scsi_qla_host {
        uint16_t iscsi_pci_func_cnt;
        uint8_t model_name[16];
        struct completion disable_acb_comp;
+       struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+       uint16_t pri_ddb_idx;
+       uint16_t sec_ddb_idx;
+       int is_reset;
 };
 
 struct ql4_task_data {
@@ -835,6 +882,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
 /*---------------------------------------------------------------------------*/
 
 /* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER    0
+#define RESET_ADAPTER   1
+
 #define PRESERVE_DDB_LIST      0
 #define REBUILD_DDB_LIST       1
 
index cbd5a20..4ac07f8 100644 (file)
@@ -12,6 +12,7 @@
 #define MAX_PRST_DEV_DB_ENTRIES                64
 #define MIN_DISC_DEV_DB_ENTRY          MAX_PRST_DEV_DB_ENTRIES
 #define MAX_DEV_DB_ENTRIES             512
+#define MAX_DEV_DB_ENTRIES_40XX                256
 
 /*************************************************************************
  *
@@ -604,6 +605,13 @@ struct addr_ctrl_blk {
        uint8_t res14[140];     /* 274-2FF */
 };
 
+#define IP_ADDR_COUNT  4 /* Total 4 IP address supported in one interface
+                          * One IPv4, one IPv6 link local and 2 IPv6
+                          */
+
+#define IP_STATE_MASK  0x0F000000
+#define IP_STATE_SHIFT 24
+
 struct init_fw_ctrl_blk {
        struct addr_ctrl_blk pri;
 /*     struct addr_ctrl_blk sec;*/
index 160db9d..d0dd4b3 100644 (file)
@@ -13,7 +13,7 @@ struct iscsi_cls_conn;
 int qla4xxx_hw_reset(struct scsi_qla_host *ha);
 int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
 int qla4xxx_soft_reset(struct scsi_qla_host *ha);
 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
 
@@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                          uint32_t *mbx_sts);
 int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
 int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
 int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
                          uint16_t stats_size, dma_addr_t stats_dma);
 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                       struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+                                            struct ddb_entry *ddb_entry);
 int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
                            struct dev_db_entry *fw_ddb_entry,
                            dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
 int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
                                     uint32_t region, uint32_t field0,
                                     uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                            struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                      struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
 
 /* BSG Functions */
 int qla4xxx_bsg_request(struct bsg_job *bsg_job);
 int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
 
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
 extern int ql4xenablemsix;
index 3075fba..1bdfa81 100644 (file)
@@ -773,22 +773,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
  * be freed so that when login happens from user space there are free DDB
  * indices available.
  **/
-static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
 {
        int max_ddbs;
        int ret;
        uint32_t idx = 0, next_idx = 0;
        uint32_t state = 0, conn_err = 0;
 
-       max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
                                     MAX_DEV_DB_ENTRIES;
 
        for (idx = 0; idx < max_ddbs; idx = next_idx) {
                ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
                                              &next_idx, &state, &conn_err,
                                                NULL, NULL);
-               if (ret == QLA_ERROR)
+               if (ret == QLA_ERROR) {
+                       next_idx++;
                        continue;
+               }
                if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
                    state == DDB_DS_SESSION_FAILED) {
                        DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +806,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
        }
 }
 
-
 /**
  * qla4xxx_initialize_adapter - initiailizes hba
  * @ha: Pointer to host adapter structure.
@@ -812,7 +813,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
  * This routine parforms all of the steps necessary to initialize the adapter.
  *
  **/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
 {
        int status = QLA_ERROR;
 
@@ -840,7 +841,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
        if (status == QLA_ERROR)
                goto exit_init_hba;
 
-       qla4xxx_free_ddb_index(ha);
+       if (is_reset == RESET_ADAPTER)
+               qla4xxx_build_ddb_list(ha, is_reset);
 
        set_bit(AF_ONLINE, &ha->flags);
 exit_init_hba:
@@ -855,38 +857,12 @@ exit_init_hba:
        return status;
 }
 
-/**
- * qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
- *
- * This routine processes a Decive Database Changed AEN Event.
- **/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
-               uint32_t state, uint32_t conn_err)
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                      struct ddb_entry *ddb_entry, uint32_t state)
 {
-       struct ddb_entry * ddb_entry;
        uint32_t old_fw_ddb_device_state;
        int status = QLA_ERROR;
 
-       /* check for out of range index */
-       if (fw_ddb_index >= MAX_DDB_ENTRIES)
-               goto exit_ddb_event;
-
-       /* Get the corresponging ddb entry */
-       ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
-       /* Device does not currently exist in our database. */
-       if (ddb_entry == NULL) {
-               ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
-                          __func__, fw_ddb_index);
-
-               if (state == DDB_DS_NO_CONNECTION_ACTIVE)
-                       clear_bit(fw_ddb_index, ha->ddb_idx_map);
-
-               goto exit_ddb_event;
-       }
-
        old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
        DEBUG2(ql4_printk(KERN_INFO, ha,
                          "%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +876,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                switch (state) {
                case DDB_DS_SESSION_ACTIVE:
                case DDB_DS_DISCOVERY:
-                       iscsi_conn_start(ddb_entry->conn);
-                       iscsi_conn_login_event(ddb_entry->conn,
-                                              ISCSI_CONN_STATE_LOGGED_IN);
+                       ddb_entry->unblock_sess(ddb_entry->sess);
                        qla4xxx_update_session_conn_param(ha, ddb_entry);
                        status = QLA_SUCCESS;
                        break;
@@ -936,9 +910,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                switch (state) {
                case DDB_DS_SESSION_ACTIVE:
                case DDB_DS_DISCOVERY:
-                       iscsi_conn_start(ddb_entry->conn);
-                       iscsi_conn_login_event(ddb_entry->conn,
-                                              ISCSI_CONN_STATE_LOGGED_IN);
+                       ddb_entry->unblock_sess(ddb_entry->sess);
                        qla4xxx_update_session_conn_param(ha, ddb_entry);
                        status = QLA_SUCCESS;
                        break;
@@ -954,7 +926,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                                __func__));
                break;
        }
+       return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+       /*
+        * This triggers a relogin.  After the relogin_timer
+        * expires, the relogin gets scheduled.  We must wait a
+        * minimum amount of time since receiving an 0x8014 AEN
+        * with failed device_state or a logout response before
+        * we can issue another relogin.
+        *
+        * Firmware pads this timeout: (time2wait +1).
+        * Driver retry to login should be longer than F/W.
+        * Otherwise F/W will fail
+        * set_ddb() mbx cmd with 0x4005 since it still
+        * counting down its time2wait.
+        */
+       atomic_set(&ddb_entry->relogin_timer, 0);
+       atomic_set(&ddb_entry->retry_relogin_timer,
+                  ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                            struct ddb_entry *ddb_entry, uint32_t state)
+{
+       uint32_t old_fw_ddb_device_state;
+       int status = QLA_ERROR;
+
+       old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "%s: DDB - old state = 0x%x, new state = 0x%x for "
+                         "index [%d]\n", __func__,
+                         ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+       ddb_entry->fw_ddb_device_state = state;
+
+       switch (old_fw_ddb_device_state) {
+       case DDB_DS_LOGIN_IN_PROCESS:
+       case DDB_DS_NO_CONNECTION_ACTIVE:
+               switch (state) {
+               case DDB_DS_SESSION_ACTIVE:
+                       ddb_entry->unblock_sess(ddb_entry->sess);
+                       qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               case DDB_DS_SESSION_FAILED:
+                       iscsi_block_session(ddb_entry->sess);
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       case DDB_DS_SESSION_ACTIVE:
+               switch (state) {
+               case DDB_DS_SESSION_FAILED:
+                       iscsi_block_session(ddb_entry->sess);
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       case DDB_DS_SESSION_FAILED:
+               switch (state) {
+               case DDB_DS_SESSION_ACTIVE:
+                       ddb_entry->unblock_sess(ddb_entry->sess);
+                       qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               case DDB_DS_SESSION_FAILED:
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       default:
+               DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+                                 __func__));
+               break;
+       }
+       return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+                               uint32_t fw_ddb_index,
+                               uint32_t state, uint32_t conn_err)
+{
+       struct ddb_entry *ddb_entry;
+       int status = QLA_ERROR;
+
+       /* check for out of range index */
+       if (fw_ddb_index >= MAX_DDB_ENTRIES)
+               goto exit_ddb_event;
+
+       /* Get the corresponging ddb entry */
+       ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+       /* Device does not currently exist in our database. */
+       if (ddb_entry == NULL) {
+               ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+                          __func__, fw_ddb_index);
+
+               if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+                       clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+               goto exit_ddb_event;
+       }
+
+       ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
 
 exit_ddb_event:
        return status;
 }
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+       struct dev_db_entry *fw_ddb_entry = NULL;
+       dma_addr_t fw_ddb_dma;
+       uint32_t mbx_sts = 0;
+       int ret;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha =  ddb_entry->ha;
+
+       if (!test_bit(AF_LINK_UP, &ha->flags))
+               return;
+
+       if (ddb_entry->ddb_type != FLASH_DDB) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Skipping login to non FLASH DB"));
+               goto exit_login;
+       }
+
+       fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+                                     &fw_ddb_dma);
+       if (fw_ddb_entry == NULL) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+               goto exit_login;
+       }
+
+       if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+               ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+               if (ret == QLA_ERROR)
+                       goto exit_login;
+
+               ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+               ha->tot_ddbs++;
+       }
+
+       memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+              sizeof(struct dev_db_entry));
+       ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+       ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+                                   fw_ddb_dma, &mbx_sts);
+       if (ret == QLA_ERROR) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+               goto exit_login;
+       }
+
+       ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+       ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+       if (ret == QLA_ERROR) {
+               ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+                          sess->targetname);
+               goto exit_login;
+       }
+
+exit_login:
+       if (fw_ddb_entry)
+               dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
index 4c2b848..c259378 100644 (file)
@@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                return status;
        }
 
+       if (is_qla40XX(ha)) {
+               if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+                       DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+                                         "prematurely completing mbx cmd as "
+                                         "adapter removal detected\n",
+                                         ha->host_no, __func__));
+                       return status;
+               }
+       }
+
        if (is_qla8022(ha)) {
                if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
                        DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -413,6 +423,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
        memcpy(ha->name_string, init_fw_cb->iscsi_name,
                min(sizeof(ha->name_string),
                sizeof(init_fw_cb->iscsi_name)));
+       ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
        /*memcpy(ha->alias, init_fw_cb->Alias,
               min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
 
index 30f31b1..4169c8b 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
 
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
@@ -31,6 +32,13 @@ static struct kmem_cache *srb_cachep;
 /*
  * Module parameter information and variables
  */
+int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+               "Set to disable exporting boot targets to sysfs\n"
+               " 0 - Export boot targets\n"
+               " 1 - Do not export boot targets (Default)");
+
 int ql4xdontresethba = 0;
 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdontresethba,
@@ -63,7 +71,7 @@ static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
                "Target Session Recovery Timeout.\n"
-               " Default: 30 sec.");
+               " Default: 120 sec.");
 
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
 /*
@@ -415,7 +423,7 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
        qla_ep = ep->dd_data;
        ha = to_qla_host(qla_ep->host);
 
-       if (adapter_up(ha))
+       if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
                ret = 1;
 
        return ret;
@@ -975,6 +983,150 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
 
 }
 
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+       uint32_t mbx_sts = 0;
+       uint16_t tmp_ddb_index;
+       int ret;
+
+get_ddb_index:
+       tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+       if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Free DDB index not available\n"));
+               ret = QLA_ERROR;
+               goto exit_get_ddb_index;
+       }
+
+       if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+               goto get_ddb_index;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Found a free DDB index at %d\n", tmp_ddb_index));
+       ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+       if (ret == QLA_ERROR) {
+               if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "DDB index = %d not available trying next\n",
+                                  tmp_ddb_index);
+                       goto get_ddb_index;
+               }
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Free FW DDB not available\n"));
+       }
+
+       *ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+       return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+                                  struct ddb_entry *ddb_entry,
+                                  char *existing_ipaddr,
+                                  char *user_ipaddr)
+{
+       uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+       char formatted_ipaddr[DDB_IPADDR_LEN];
+       int status = QLA_SUCCESS, ret = 0;
+
+       if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+               ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+                              '\0', NULL);
+               if (ret == 0) {
+                       status = QLA_ERROR;
+                       goto out_match;
+               }
+               ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+       } else {
+               ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+                              '\0', NULL);
+               if (ret == 0) {
+                       status = QLA_ERROR;
+                       goto out_match;
+               }
+               ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+       }
+
+       if (strcmp(existing_ipaddr, formatted_ipaddr))
+               status = QLA_ERROR;
+
+out_match:
+       return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+                                     struct iscsi_cls_conn *cls_conn)
+{
+       int idx = 0, max_ddbs, rval;
+       struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+       struct iscsi_session *sess, *existing_sess;
+       struct iscsi_conn *conn, *existing_conn;
+       struct ddb_entry *ddb_entry;
+
+       sess = cls_sess->dd_data;
+       conn = cls_conn->dd_data;
+
+       if (sess->targetname == NULL ||
+           conn->persistent_address == NULL ||
+           conn->persistent_port == 0)
+               return QLA_ERROR;
+
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+
+       for (idx = 0; idx < max_ddbs; idx++) {
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if (ddb_entry == NULL)
+                       continue;
+
+               if (ddb_entry->ddb_type != FLASH_DDB)
+                       continue;
+
+               existing_sess = ddb_entry->sess->dd_data;
+               existing_conn = ddb_entry->conn->dd_data;
+
+               if (existing_sess->targetname == NULL ||
+                   existing_conn->persistent_address == NULL ||
+                   existing_conn->persistent_port == 0)
+                       continue;
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "IQN = %s User IQN = %s\n",
+                                 existing_sess->targetname,
+                                 sess->targetname));
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "IP = %s User IP = %s\n",
+                                 existing_conn->persistent_address,
+                                 conn->persistent_address));
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Port = %d User Port = %d\n",
+                                 existing_conn->persistent_port,
+                                 conn->persistent_port));
+
+               if (strcmp(existing_sess->targetname, sess->targetname))
+                       continue;
+               rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+                                       existing_conn->persistent_address,
+                                       conn->persistent_address);
+               if (rval == QLA_ERROR)
+                       continue;
+               if (existing_conn->persistent_port != conn->persistent_port)
+                       continue;
+               break;
+       }
+
+       if (idx == max_ddbs)
+               return QLA_ERROR;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Match found in fwdb sessions\n"));
+       return QLA_SUCCESS;
+}
+
 static struct iscsi_cls_session *
 qla4xxx_session_create(struct iscsi_endpoint *ep,
                        uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1136,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
        struct scsi_qla_host *ha;
        struct qla_endpoint *qla_ep;
        struct ddb_entry *ddb_entry;
-       uint32_t ddb_index;
-       uint32_t mbx_sts = 0;
+       uint16_t ddb_index;
        struct iscsi_session *sess;
        struct sockaddr *dst_addr;
        int ret;
@@ -1000,32 +1151,9 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
        dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
        ha = to_qla_host(qla_ep->host);
 
-get_ddb_index:
-       ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
-
-       if (ddb_index >= MAX_DDB_ENTRIES) {
-               DEBUG2(ql4_printk(KERN_INFO, ha,
-                                 "Free DDB index not available\n"));
-               return NULL;
-       }
-
-       if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
-               goto get_ddb_index;
-
-       DEBUG2(ql4_printk(KERN_INFO, ha,
-                         "Found a free DDB index at %d\n", ddb_index));
-       ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
-       if (ret == QLA_ERROR) {
-               if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
-                       ql4_printk(KERN_INFO, ha,
-                                  "DDB index = %d not available trying next\n",
-                                  ddb_index);
-                       goto get_ddb_index;
-               }
-               DEBUG2(ql4_printk(KERN_INFO, ha,
-                                 "Free FW DDB not available\n"));
+       ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+       if (ret == QLA_ERROR)
                return NULL;
-       }
 
        cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
                                       cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1168,8 @@ get_ddb_index:
        ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
        ddb_entry->ha = ha;
        ddb_entry->sess = cls_sess;
+       ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+       ddb_entry->ddb_change = qla4xxx_ddb_change;
        cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
        ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
        ha->tot_ddbs++;
@@ -1077,6 +1207,9 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
        DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
        cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
                                    conn_idx);
+       if (!cls_conn)
+               return NULL;
+
        sess = cls_sess->dd_data;
        ddb_entry = sess->dd_data;
        ddb_entry->conn = cls_conn;
@@ -1109,7 +1242,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
        struct iscsi_session *sess;
        struct ddb_entry *ddb_entry;
        struct scsi_qla_host *ha;
-       struct dev_db_entry *fw_ddb_entry;
+       struct dev_db_entry *fw_ddb_entry = NULL;
        dma_addr_t fw_ddb_entry_dma;
        uint32_t mbx_sts = 0;
        int ret = 0;
@@ -1120,12 +1253,25 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
        ddb_entry = sess->dd_data;
        ha = ddb_entry->ha;
 
+       /* Check if we have  matching FW DDB, if yes then do not
+        * login to this target. This could cause target to logout previous
+        * connection
+        */
+       ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+       if (ret == QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha,
+                          "Session already exist in FW.\n");
+               ret = -EEXIST;
+               goto exit_conn_start;
+       }
+
        fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
                                          &fw_ddb_entry_dma, GFP_KERNEL);
        if (!fw_ddb_entry) {
                ql4_printk(KERN_ERR, ha,
                           "%s: Unable to allocate dma buffer\n", __func__);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto exit_conn_start;
        }
 
        ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1284,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
                if (mbx_sts)
                        if (ddb_entry->fw_ddb_device_state ==
                                                DDB_DS_SESSION_ACTIVE) {
-                               iscsi_conn_start(ddb_entry->conn);
-                               iscsi_conn_login_event(ddb_entry->conn,
-                                               ISCSI_CONN_STATE_LOGGED_IN);
+                               ddb_entry->unblock_sess(ddb_entry->sess);
                                goto exit_set_param;
                        }
 
@@ -1167,8 +1311,9 @@ exit_set_param:
        ret = 0;
 
 exit_conn_start:
-       dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
-                         fw_ddb_entry, fw_ddb_entry_dma);
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
        return ret;
 }
 
@@ -1344,6 +1489,101 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
        return -ENOSYS;
 }
 
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+                                    struct dev_db_entry *fw_ddb_entry,
+                                    struct iscsi_cls_session *cls_sess,
+                                    struct iscsi_cls_conn *cls_conn)
+{
+       int buflen = 0;
+       struct iscsi_session *sess;
+       struct iscsi_conn *conn;
+       char ip_addr[DDB_IPADDR_LEN];
+       uint16_t options = 0;
+
+       sess = cls_sess->dd_data;
+       conn = cls_conn->dd_data;
+
+       conn->max_recv_dlength = BYTE_UNITS *
+                         le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+       conn->max_xmit_dlength = BYTE_UNITS *
+                         le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+       sess->initial_r2t_en =
+                           (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+       sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+       sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+       sess->first_burst = BYTE_UNITS *
+                              le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+       sess->max_burst = BYTE_UNITS *
+                                le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+       sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+       sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+       conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+       sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+       options = le16_to_cpu(fw_ddb_entry->options);
+       if (options & DDB_OPT_IPV6_DEVICE)
+               sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+       else
+               sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+       iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+                       (char *)fw_ddb_entry->iscsi_name, buflen);
+       iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+                       (char *)ha->name_string, buflen);
+       iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+                       (char *)ip_addr, buflen);
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+                                            struct ddb_entry *ddb_entry)
+{
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_cls_conn *cls_conn;
+       uint32_t ddb_state;
+       dma_addr_t fw_ddb_entry_dma;
+       struct dev_db_entry *fw_ddb_entry;
+
+       fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                         &fw_ddb_entry_dma, GFP_KERNEL);
+       if (!fw_ddb_entry) {
+               ql4_printk(KERN_ERR, ha,
+                          "%s: Unable to allocate dma buffer\n", __func__);
+               goto exit_session_conn_fwddb_param;
+       }
+
+       if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+                                   fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+                                   NULL, NULL, NULL) == QLA_ERROR) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+                                 "get_ddb_entry for fw_ddb_index %d\n",
+                                 ha->host_no, __func__,
+                                 ddb_entry->fw_ddb_index));
+               goto exit_session_conn_fwddb_param;
+       }
+
+       cls_sess = ddb_entry->sess;
+
+       cls_conn = ddb_entry->conn;
+
+       /* Update params */
+       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
+}
+
 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                       struct ddb_entry *ddb_entry)
 {
@@ -1360,7 +1600,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
        if (!fw_ddb_entry) {
                ql4_printk(KERN_ERR, ha,
                           "%s: Unable to allocate dma buffer\n", __func__);
-               return;
+               goto exit_session_conn_param;
        }
 
        if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1610,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                  "get_ddb_entry for fw_ddb_index %d\n",
                                  ha->host_no, __func__,
                                  ddb_entry->fw_ddb_index));
-               return;
+               goto exit_session_conn_param;
        }
 
        cls_sess = ddb_entry->sess;
@@ -1379,6 +1619,12 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
        cls_conn = ddb_entry->conn;
        conn = cls_conn->dd_data;
 
+       /* Update timers after login */
+       ddb_entry->default_relogin_timeout =
+                               le16_to_cpu(fw_ddb_entry->def_timeout);
+       ddb_entry->default_time2wait =
+                               le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
        /* Update params */
        conn->max_recv_dlength = BYTE_UNITS *
                          le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1653,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
 
        memcpy(sess->initiatorname, ha->name_string,
               min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
 }
 
 /*
@@ -1607,6 +1858,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
                vfree(ha->chap_list);
        ha->chap_list = NULL;
 
+       if (ha->fw_ddb_dma_pool)
+               dma_pool_destroy(ha->fw_ddb_dma_pool);
+
        /* release io space registers  */
        if (is_qla8022(ha)) {
                if (ha->nx_pcibase)
@@ -1689,6 +1943,16 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
                goto mem_alloc_error_exit;
        }
 
+       ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+                                             DDB_DMA_BLOCK_SIZE, 8, 0);
+
+       if (ha->fw_ddb_dma_pool == NULL) {
+               ql4_printk(KERN_WARNING, ha,
+                          "%s: fw_ddb_dma_pool allocation failed..\n",
+                          __func__);
+               goto mem_alloc_error_exit;
+       }
+
        return QLA_SUCCESS;
 
 mem_alloc_error_exit:
@@ -1800,6 +2064,60 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
        }
 }
 
+void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       if (!(ddb_entry->ddb_type == FLASH_DDB))
+               return;
+
+       if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+           !iscsi_is_session_online(cls_sess)) {
+               if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+                   INVALID_ENTRY) {
+                       if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+                                       0) {
+                               atomic_set(&ddb_entry->retry_relogin_timer,
+                                          INVALID_ENTRY);
+                               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                               set_bit(DF_RELOGIN, &ddb_entry->flags);
+                               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                      "%s: index [%d] login device\n",
+                                       __func__, ddb_entry->fw_ddb_index));
+                       } else
+                               atomic_dec(&ddb_entry->retry_relogin_timer);
+               }
+       }
+
+       /* Wait for relogin to timeout */
+       if (atomic_read(&ddb_entry->relogin_timer) &&
+           (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+               /*
+                * If the relogin times out and the device is
+                * still NOT ONLINE then try and relogin again.
+                */
+               if (!iscsi_is_session_online(cls_sess)) {
+                       /* Reset retry relogin timer */
+                       atomic_inc(&ddb_entry->relogin_retry_count);
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                               "%s: index[%d] relogin timed out-retrying"
+                               " relogin (%d), retry (%d)\n", __func__,
+                               ddb_entry->fw_ddb_index,
+                               atomic_read(&ddb_entry->relogin_retry_count),
+                               ddb_entry->default_time2wait + 4));
+                       set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                       atomic_set(&ddb_entry->retry_relogin_timer,
+                                  ddb_entry->default_time2wait + 4);
+               }
+       }
+}
+
 /**
  * qla4xxx_timer - checks every second for work to do.
  * @ha: Pointer to host adapter structure.
@@ -1809,6 +2127,8 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
        int start_dpc = 0;
        uint16_t w;
 
+       iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
        /* If we are in the middle of AER/EEH processing
         * skip any processing and reschedule the timer
         */
@@ -2078,7 +2398,12 @@ static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
        sess = cls_session->dd_data;
        ddb_entry = sess->dd_data;
        ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
-       iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+
+       if (ddb_entry->ddb_type == FLASH_DDB)
+               iscsi_block_session(ddb_entry->sess);
+       else
+               iscsi_session_failure(cls_session->dd_data,
+                                     ISCSI_ERR_CONN_FAILED);
 }
 
 /**
@@ -2163,7 +2488,7 @@ recover_ha_init_adapter:
 
                /* NOTE: AF_ONLINE flag set upon successful completion of
                 *       qla4xxx_initialize_adapter */
-               status = qla4xxx_initialize_adapter(ha);
+               status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
        }
 
        /* Retry failed adapter initialization, if necessary
@@ -2245,17 +2570,108 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
                        iscsi_unblock_session(ddb_entry->sess);
                } else {
                        /* Trigger relogin */
-                       iscsi_session_failure(cls_session->dd_data,
-                                             ISCSI_ERR_CONN_FAILED);
+                       if (ddb_entry->ddb_type == FLASH_DDB) {
+                               if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                                       qla4xxx_arm_relogin_timer(ddb_entry);
+                       } else
+                               iscsi_session_failure(cls_session->dd_data,
+                                                     ISCSI_ERR_CONN_FAILED);
                }
        }
 }
 
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+       ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                  " unblock session\n", ha->host_no, __func__,
+                  ddb_entry->fw_ddb_index);
+
+       iscsi_unblock_session(ddb_entry->sess);
+
+       /* Start scan target */
+       if (test_bit(AF_ONLINE, &ha->flags)) {
+               ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                          " start scan\n", ha->host_no, __func__,
+                          ddb_entry->fw_ddb_index);
+               scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+       }
+       return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+       ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                  " unblock user space session\n", ha->host_no, __func__,
+                  ddb_entry->fw_ddb_index);
+       iscsi_conn_start(ddb_entry->conn);
+       iscsi_conn_login_event(ddb_entry->conn,
+                              ISCSI_CONN_STATE_LOGGED_IN);
+
+       return QLA_SUCCESS;
+}
+
 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
 {
        iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
 }
 
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+       uint16_t relogin_timer;
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       relogin_timer = max(ddb_entry->default_relogin_timeout,
+                           (uint16_t)RELOGIN_TOV);
+       atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+                         ddb_entry->fw_ddb_index, relogin_timer));
+
+       qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       if (!(ddb_entry->ddb_type == FLASH_DDB))
+               return;
+
+       if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+           !iscsi_is_session_online(cls_sess)) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "relogin issued\n"));
+               qla4xxx_relogin_flash_ddb(cls_sess);
+       }
+}
+
 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
 {
        if (ha->dpc_thread)
@@ -2356,6 +2772,12 @@ dpc_post_reset_ha:
        if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
                qla4xxx_get_dhcp_ip_address(ha);
 
+       /* ---- relogin device? --- */
+       if (adapter_up(ha) &&
+           test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+               iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+       }
+
        /* ---- link change? --- */
        if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
                if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2790,12 @@ dpc_post_reset_ha:
                         * fatal error recovery.  Therefore, the driver must
                         * manually relogin to devices when recovering from
                         * connection failures, logouts, expired KATO, etc. */
-
-                       qla4xxx_relogin_all_devices(ha);
+                       if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+                               qla4xxx_build_ddb_list(ha, ha->is_reset);
+                               iscsi_host_for_each_session(ha->host,
+                                               qla4xxx_login_flash_ddb);
+                       } else
+                               qla4xxx_relogin_all_devices(ha);
                }
        }
 }
@@ -2867,6 +3293,9 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
                          " target ID %d\n", __func__, ddb_index[0],
                          ddb_index[1]));
 
+       ha->pri_ddb_idx = ddb_index[0];
+       ha->sec_ddb_idx = ddb_index[1];
+
 exit_boot_info_free:
        dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
 exit_boot_info:
@@ -3034,6 +3463,9 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
                return ret;
        }
 
+       if (ql4xdisablesysfsboot)
+               return QLA_SUCCESS;
+
        if (ddb_index[0] == 0xffff)
                goto sec_target;
 
@@ -3066,7 +3498,15 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
        struct iscsi_boot_kobj *boot_kobj;
 
        if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
-               return 0;
+               return QLA_ERROR;
+
+       if (ql4xdisablesysfsboot) {
+               ql4_printk(KERN_INFO, ha,
+                          "%s: syfsboot disabled - driver will trigger login"
+                          "and publish session for discovery .\n", __func__);
+               return QLA_SUCCESS;
+       }
+
 
        ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
        if (!ha->boot_kset)
@@ -3108,7 +3548,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
        if (!boot_kobj)
                goto put_host;
 
-       return 0;
+       return QLA_SUCCESS;
 
 put_host:
        scsi_host_put(ha->host);
@@ -3174,9 +3614,507 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
 exit_chap_list:
        dma_free_coherent(&ha->pdev->dev, chap_size,
                        chap_flash_data, chap_dma);
-       return;
 }
 
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+                                 struct ql4_tuple_ddb *tddb)
+{
+       struct scsi_qla_host *ha;
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_session *sess;
+       struct iscsi_conn *conn;
+
+       DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+       ha = ddb_entry->ha;
+       cls_sess = ddb_entry->sess;
+       sess = cls_sess->dd_data;
+       cls_conn = ddb_entry->conn;
+       conn = cls_conn->dd_data;
+
+       tddb->tpgt = sess->tpgt;
+       tddb->port = conn->persistent_port;
+       strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+       strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+                                     struct ql4_tuple_ddb *tddb)
+{
+       uint16_t options = 0;
+
+       tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+       memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+              min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+       options = le16_to_cpu(fw_ddb_entry->options);
+       if (options & DDB_OPT_IPV6_DEVICE)
+               sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+       else
+               sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+       tddb->port = le16_to_cpu(fw_ddb_entry->port);
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+                                    struct ql4_tuple_ddb *old_tddb,
+                                    struct ql4_tuple_ddb *new_tddb)
+{
+       if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+               return QLA_ERROR;
+
+       if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+               return QLA_ERROR;
+
+       if (old_tddb->port != new_tddb->port)
+               return QLA_ERROR;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+                         old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+                         old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+                         new_tddb->ip_addr, new_tddb->iscsi_name));
+
+       return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+                                    struct dev_db_entry *fw_ddb_entry)
+{
+       struct ddb_entry *ddb_entry;
+       struct ql4_tuple_ddb *fw_tddb = NULL;
+       struct ql4_tuple_ddb *tmp_tddb = NULL;
+       int idx;
+       int ret = QLA_ERROR;
+
+       fw_tddb = vzalloc(sizeof(*fw_tddb));
+       if (!fw_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+       if (!tmp_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+       for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if (ddb_entry == NULL)
+                       continue;
+
+               qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+               if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+                       ret = QLA_SUCCESS; /* found */
+                       goto exit_check;
+               }
+       }
+
+exit_check:
+       if (fw_tddb)
+               vfree(fw_tddb);
+       if (tmp_tddb)
+               vfree(tmp_tddb);
+       return ret;
+}
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+                                      struct list_head *list_nt,
+                                      struct dev_db_entry *fw_ddb_entry)
+{
+       struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+       struct ql4_tuple_ddb *fw_tddb = NULL;
+       struct ql4_tuple_ddb *tmp_tddb = NULL;
+       int ret = QLA_ERROR;
+
+       fw_tddb = vzalloc(sizeof(*fw_tddb));
+       if (!fw_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+       if (!tmp_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+       list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+               qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
+               if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+                       ret = QLA_SUCCESS; /* found */
+                       goto exit_check;
+               }
+       }
+
+exit_check:
+       if (fw_tddb)
+               vfree(fw_tddb);
+       if (tmp_tddb)
+               vfree(tmp_tddb);
+       return ret;
+}
+
+static void qla4xxx_free_nt_list(struct list_head *list_nt)
+{
+       struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+
+       /* Free up the normaltargets list */
+       list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+               list_del_init(&nt_ddb_idx->list);
+               vfree(nt_ddb_idx);
+       }
+
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+                                       struct dev_db_entry *fw_ddb_entry)
+{
+       struct iscsi_endpoint *ep;
+       struct sockaddr_in *addr;
+       struct sockaddr_in6 *addr6;
+       struct sockaddr *dst_addr;
+       char *ip;
+
+       /* TODO: need to destroy on unload iscsi_endpoint*/
+       dst_addr = vmalloc(sizeof(*dst_addr));
+       if (!dst_addr)
+               return NULL;
+
+       if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+               dst_addr->sa_family = AF_INET6;
+               addr6 = (struct sockaddr_in6 *)dst_addr;
+               ip = (char *)&addr6->sin6_addr;
+               memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+               addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+       } else {
+               dst_addr->sa_family = AF_INET;
+               addr = (struct sockaddr_in *)dst_addr;
+               ip = (char *)&addr->sin_addr;
+               memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+               addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+       }
+
+       ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+       vfree(dst_addr);
+       return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+       if (ql4xdisablesysfsboot)
+               return QLA_SUCCESS;
+       if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+               return QLA_ERROR;
+       return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+                                         struct ddb_entry *ddb_entry)
+{
+       ddb_entry->ddb_type = FLASH_DDB;
+       ddb_entry->fw_ddb_index = INVALID_ENTRY;
+       ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+       ddb_entry->ha = ha;
+       ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+       ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+
+       atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+       atomic_set(&ddb_entry->relogin_timer, 0);
+       atomic_set(&ddb_entry->relogin_retry_count, 0);
+
+       ddb_entry->default_relogin_timeout =
+               le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+       ddb_entry->default_time2wait =
+               le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+       uint32_t idx = 0;
+       uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+       uint32_t sts[MBOX_REG_COUNT];
+       uint32_t ip_state;
+       unsigned long wtime;
+       int ret;
+
+       wtime = jiffies + (HZ * IP_CONFIG_TOV);
+       do {
+               for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+                       if (ip_idx[idx] == -1)
+                               continue;
+
+                       ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+                       if (ret == QLA_ERROR) {
+                               ip_idx[idx] = -1;
+                               continue;
+                       }
+
+                       ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Waiting for IP state for idx = %d, state = 0x%x\n",
+                                         ip_idx[idx], ip_state));
+                       if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+                           ip_state == IP_ADDRSTATE_INVALID ||
+                           ip_state == IP_ADDRSTATE_PREFERRED ||
+                           ip_state == IP_ADDRSTATE_DEPRICATED ||
+                           ip_state == IP_ADDRSTATE_DISABLING)
+                               ip_idx[idx] = -1;
+
+               }
+
+               /* Break if all IP states checked */
+               if ((ip_idx[0] == -1) &&
+                   (ip_idx[1] == -1) &&
+                   (ip_idx[2] == -1) &&
+                   (ip_idx[3] == -1))
+                       break;
+               schedule_timeout_uninterruptible(HZ);
+       } while (time_after(wtime, jiffies));
+}
+
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+       int max_ddbs;
+       int ret;
+       uint32_t idx = 0, next_idx = 0;
+       uint32_t state = 0, conn_err = 0;
+       uint16_t conn_id;
+       struct dev_db_entry *fw_ddb_entry;
+       struct ddb_entry *ddb_entry = NULL;
+       dma_addr_t fw_ddb_dma;
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_session *sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_endpoint *ep;
+       uint16_t cmds_max = 32, tmo = 0;
+       uint32_t initial_cmdsn = 0;
+       struct list_head list_st, list_nt; /* List of sendtargets */
+       struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
+       int fw_idx_size;
+       unsigned long wtime;
+       struct qla_ddb_index  *nt_ddb_idx;
+
+       if (!test_bit(AF_LINK_UP, &ha->flags)) {
+               set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+               ha->is_reset = is_reset;
+               return;
+       }
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+
+       fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+                                     &fw_ddb_dma);
+       if (fw_ddb_entry == NULL) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+               goto exit_ddb_list;
+       }
+
+       INIT_LIST_HEAD(&list_st);
+       INIT_LIST_HEAD(&list_nt);
+       fw_idx_size = sizeof(struct qla_ddb_index);
+
+       for (idx = 0; idx < max_ddbs; idx = next_idx) {
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+                                             fw_ddb_dma, NULL,
+                                             &next_idx, &state, &conn_err,
+                                             NULL, &conn_id);
+               if (ret == QLA_ERROR)
+                       break;
+
+               if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+                       goto continue_next_st;
+
+               /* Check if ST, add to the list_st */
+               if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+                       goto continue_next_st;
+
+               st_ddb_idx = vzalloc(fw_idx_size);
+               if (!st_ddb_idx)
+                       break;
+
+               st_ddb_idx->fw_ddb_idx = idx;
+
+               list_add_tail(&st_ddb_idx->list, &list_st);
+continue_next_st:
+               if (next_idx == 0)
+                       break;
+       }
+
+       /* Before issuing conn open mbox, ensure all IPs states are configured
+        * Note, conn open fails if IPs are not configured
+        */
+       qla4xxx_wait_for_ip_configuration(ha);
+
+       /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+               qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+       }
+
+       /* Wait to ensure all sendtargets are done for min 12 sec wait */
+       tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Default time to wait for build ddb %d\n", tmo));
+
+       wtime = jiffies + (HZ * tmo);
+       do {
+               list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
+                                        list) {
+                       ret = qla4xxx_get_fwddb_entry(ha,
+                                                     st_ddb_idx->fw_ddb_idx,
+                                                     NULL, 0, NULL, &next_idx,
+                                                     &state, &conn_err, NULL,
+                                                     NULL);
+                       if (ret == QLA_ERROR)
+                               continue;
+
+                       if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                           state == DDB_DS_SESSION_FAILED) {
+                               list_del_init(&st_ddb_idx->list);
+                               vfree(st_ddb_idx);
+                       }
+               }
+               schedule_timeout_uninterruptible(HZ / 10);
+       } while (time_after(wtime, jiffies));
+
+       /* Free up the sendtargets list */
+       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+               list_del_init(&st_ddb_idx->list);
+               vfree(st_ddb_idx);
+       }
+
+       for (idx = 0; idx < max_ddbs; idx = next_idx) {
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+                                             fw_ddb_dma, NULL,
+                                             &next_idx, &state, &conn_err,
+                                             NULL, &conn_id);
+               if (ret == QLA_ERROR)
+                       break;
+
+               if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+                       goto continue_next_nt;
+
+               /* Check if NT, then add to list it */
+               if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+                       goto continue_next_nt;
+
+               if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                   state == DDB_DS_SESSION_FAILED) {
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Adding  DDB to session = 0x%x\n",
+                                         idx));
+                       if (is_reset == INIT_ADAPTER) {
+                               nt_ddb_idx = vmalloc(fw_idx_size);
+                               if (!nt_ddb_idx)
+                                       break;
+
+                               nt_ddb_idx->fw_ddb_idx = idx;
+
+                               memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+                                      sizeof(struct dev_db_entry));
+
+                               if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
+                                               fw_ddb_entry) == QLA_SUCCESS) {
+                                       vfree(nt_ddb_idx);
+                                       goto continue_next_nt;
+                               }
+                               list_add_tail(&nt_ddb_idx->list, &list_nt);
+                       } else if (is_reset == RESET_ADAPTER) {
+                               if (qla4xxx_is_session_exists(ha,
+                                                  fw_ddb_entry) == QLA_SUCCESS)
+                                       goto continue_next_nt;
+                       }
+
+                       /* Create session object, with INVALID_ENTRY,
+                        * the targer_id would get set when we issue the login
+                        */
+                       cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
+                                               ha->host, cmds_max,
+                                               sizeof(struct ddb_entry),
+                                               sizeof(struct ql4_task_data),
+                                               initial_cmdsn, INVALID_ENTRY);
+                       if (!cls_sess)
+                               goto exit_ddb_list;
+
+                       /*
+                        * iscsi_session_setup increments the driver reference
+                        * count which wouldn't let the driver to be unloaded.
+                        * so calling module_put function to decrement the
+                        * reference count.
+                        **/
+                       module_put(qla4xxx_iscsi_transport.owner);
+                       sess = cls_sess->dd_data;
+                       ddb_entry = sess->dd_data;
+                       ddb_entry->sess = cls_sess;
+
+                       cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+                       memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+                              sizeof(struct dev_db_entry));
+
+                       qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+                       cls_conn = iscsi_conn_setup(cls_sess,
+                                                   sizeof(struct qla_conn),
+                                                   conn_id);
+                       if (!cls_conn)
+                               goto exit_ddb_list;
+
+                       ddb_entry->conn = cls_conn;
+
+                       /* Setup ep, for displaying attributes in sysfs */
+                       ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+                       if (ep) {
+                               ep->conn = cls_conn;
+                               cls_conn->ep = ep;
+                       } else {
+                               DEBUG2(ql4_printk(KERN_ERR, ha,
+                                                 "Unable to get ep\n"));
+                       }
+
+                       /* Update sess/conn params */
+                       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
+                                                cls_conn);
+
+                       if (is_reset == RESET_ADAPTER) {
+                               iscsi_block_session(cls_sess);
+                               /* Use the relogin path to discover new devices
+                                *  by short-circuting the logic of setting
+                                *  timer to relogin - instead set the flags
+                                *  to initiate login right away.
+                                */
+                               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                               set_bit(DF_RELOGIN, &ddb_entry->flags);
+                       }
+               }
+continue_next_nt:
+               if (next_idx == 0)
+                       break;
+       }
+exit_ddb_list:
+       qla4xxx_free_nt_list(&list_nt);
+       if (fw_ddb_entry)
+               dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+       qla4xxx_free_ddb_index(ha);
+}
+
+
 /**
  * qla4xxx_probe_adapter - callback function to probe HBA
  * @pdev: pointer to pci_dev structure
@@ -3298,7 +4236,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
         * firmware
         * NOTE: interrupts enabled upon successful completion
         */
-       status = qla4xxx_initialize_adapter(ha);
+       status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
        while ((!test_bit(AF_ONLINE, &ha->flags)) &&
            init_retry_count++ < MAX_INIT_RETRIES) {
 
@@ -3319,7 +4257,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
                if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
                        continue;
 
-               status = qla4xxx_initialize_adapter(ha);
+               status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
        }
 
        if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4324,16 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
               ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
               ha->patch_number, ha->build_number);
 
-       qla4xxx_create_chap_list(ha);
-
        if (qla4xxx_setup_boot_info(ha))
                ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
                           __func__);
 
+               /* Perform the build ddb list and login to each */
+       qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+       iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+
+       qla4xxx_create_chap_list(ha);
+
        qla4xxx_create_ifaces(ha);
        return 0;
 
@@ -3449,6 +4391,38 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
        }
 }
 
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+       struct ddb_entry *ddb_entry;
+       int options;
+       int idx;
+
+       for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if ((ddb_entry != NULL) &&
+                   (ddb_entry->ddb_type == FLASH_DDB)) {
+
+                       options = LOGOUT_OPTION_CLOSE_SESSION;
+                       if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
+                           == QLA_ERROR)
+                               ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
+                                          __func__);
+
+                       qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+                       /*
+                        * we have decremented the reference count of the driver
+                        * when we setup the session to have the driver unload
+                        * to be seamless without actually destroying the
+                        * session
+                        **/
+                       try_module_get(qla4xxx_iscsi_transport.owner);
+                       iscsi_destroy_endpoint(ddb_entry->conn->ep);
+                       qla4xxx_free_ddb(ha, ddb_entry);
+                       iscsi_session_teardown(ddb_entry->sess);
+               }
+       }
+}
 /**
  * qla4xxx_remove_adapter - calback function to remove adapter.
  * @pci_dev: PCI device pointer
@@ -3465,9 +4439,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
        /* destroy iface from sysfs */
        qla4xxx_destroy_ifaces(ha);
 
-       if (ha->boot_kset)
+       if ((!ql4xdisablesysfsboot) && ha->boot_kset)
                iscsi_boot_destroy_kset(ha->boot_kset);
 
+       qla4xxx_destroy_fw_ddb_session(ha);
+
        scsi_remove_host(ha->host);
 
        qla4xxx_free_adapter(ha);
@@ -4115,7 +5091,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
 
                qla4_8xxx_idc_unlock(ha);
                clear_bit(AF_FW_RECOVERY, &ha->flags);
-               rval = qla4xxx_initialize_adapter(ha);
+               rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
                qla4_8xxx_idc_lock(ha);
 
                if (rval != QLA_SUCCESS) {
@@ -4151,7 +5127,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
                    QLA82XX_DEV_READY)) {
                        clear_bit(AF_FW_RECOVERY, &ha->flags);
-                       rval = qla4xxx_initialize_adapter(ha);
+                       rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
                        if (rval == QLA_SUCCESS) {
                                ret = qla4xxx_request_irqs(ha);
                                if (ret) {
index c15347d..5254e57 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
index a1fd73d..8ba4510 100644 (file)
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
        depends on FSL_SOC
 
 config SPI_FSL_SPI
-       tristate "Freescale SPI controller"
+       bool "Freescale SPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
          MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
 
 config SPI_FSL_ESPI
-       tristate "Freescale eSPI controller"
+       bool "Freescale eSPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
index 024b48a..acc88b4 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/spinlock.h>
index e093d3e..0094c64 100644 (file)
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
        spi_bitbang_cleanup(spi);
 }
 
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
 {
        int value;
 
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
        return value;
 }
 
-static int __init
+static int __devinit
 spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
        u16 *res_flags)
 {
index 21c70b2..182e9c8 100644 (file)
@@ -8,6 +8,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
index 84c934c..520e828 100644 (file)
@@ -517,10 +517,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
 
 static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
 {
-       ssb_pcicore_fix_sprom_core_index(pc);
+       struct ssb_device *pdev = pc->dev;
+       struct ssb_bus *bus = pdev->bus;
+
+       if (bus->bustype == SSB_BUSTYPE_PCI)
+               ssb_pcicore_fix_sprom_core_index(pc);
 
        /* Disable PCI interrupts. */
-       ssb_write32(pc->dev, SSB_INTVEC, 0);
+       ssb_write32(pdev, SSB_INTVEC, 0);
 
        /* Additional PCIe always once-executed workarounds */
        if (pc->dev->id.coreid == SSB_DEV_PCIE) {
index 34c3bab..973223f 100644 (file)
@@ -607,6 +607,29 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
        memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24,
               sizeof(out->antenna_gain.ghz5));
 
+       /* Extract FEM info */
+       SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G,
+               SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+       SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G,
+               SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+       SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G,
+               SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+       SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G,
+               SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+       SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G,
+               SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
+       SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G,
+               SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+       SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G,
+               SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+       SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G,
+               SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+       SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G,
+               SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+       SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G,
+               SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
        sprom_extract_r458(out, in);
 
        /* TODO - get remaining rev 8 stuff needed */
index 21d8c1c..5e78c77 100644 (file)
@@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
        }
 
        insns =
-           kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL);
+           kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
        if (!insns) {
                DPRINTK("kmalloc failed\n");
                ret = -ENOMEM;
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
        return ret;
 }
 
-static void comedi_unmap(struct vm_area_struct *area)
+
+static void comedi_vm_open(struct vm_area_struct *area)
+{
+       struct comedi_async *async;
+       struct comedi_device *dev;
+
+       async = area->vm_private_data;
+       dev = async->subdevice->device;
+
+       mutex_lock(&dev->mutex);
+       async->mmap_count++;
+       mutex_unlock(&dev->mutex);
+}
+
+static void comedi_vm_close(struct vm_area_struct *area)
 {
        struct comedi_async *async;
        struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
 }
 
 static struct vm_operations_struct comedi_vm_ops = {
-       .close = comedi_unmap,
+       .open = comedi_vm_open,
+       .close = comedi_vm_close,
 };
 
 static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
 {
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_async *async = NULL;
        unsigned long start = vma->vm_start;
        unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
        int i;
        int retval;
        struct comedi_subdevice *s;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+
+       dev_file_info = comedi_get_device_file_info(minor);
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
        if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
 {
        unsigned int mask = 0;
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_subdevice *read_subdev;
        struct comedi_subdevice *write_subdev;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
        if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        if (!dev->attached) {
                DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
                                retval = -EAGAIN;
                                break;
                        }
+                       schedule();
                        if (signal_pending(current)) {
                                retval = -ERESTARTSYS;
                                break;
                        }
-                       schedule();
                        if (!s->busy)
                                break;
                        if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        if (!dev->attached) {
                DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
                                retval = -EAGAIN;
                                break;
                        }
+                       schedule();
                        if (signal_pending(current)) {
                                retval = -ERESTARTSYS;
                                break;
                        }
-                       schedule();
                        if (!s->busy) {
                                retval = 0;
                                break;
@@ -1885,11 +1924,17 @@ ok:
 static int comedi_close(struct inode *inode, struct file *file)
 {
        const unsigned minor = iminor(inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_subdevice *s = NULL;
        int i;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
 
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
 static int comedi_fasync(int fd, struct file *file, int on)
 {
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
 
-       struct comedi_device *dev = dev_file_info->device;
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        return fasync_helper(fd, file, on, &dev->async_queue);
 }
index a8fea9a..6144afb 100644 (file)
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
 #define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
 #define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
 /*
@@ -25,7 +25,7 @@ Driver: usbduxsigma
 Description: University of Stirling USB DAQ & INCITE Technology Limited
 Devices: [ITL] USB-DUX (usbduxsigma.o)
 Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 21 Jul 2011
+Updated: 8 Nov 2011
 Status: testing
 */
 /*
@@ -44,6 +44,7 @@ Status: testing
  *   0.3: proper vendor ID and driver name
  *   0.4: fixed D/A voltage range
  *   0.5: various bug fixes, health check at startup
+ *   0.6: corrected wrong input range
  */
 
 /* generates loads of debug info */
@@ -175,7 +176,7 @@ Status: testing
 /* comedi constants */
 static const struct comedi_lrange range_usbdux_ai_range = { 1, {
                                                                BIP_RANGE
-                                                               (2.65)
+                                                               (2.65/2.0)
                                                                }
 };
 
index fb2e89c..5385da2 100644 (file)
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
        {USB_DEVICE(0x0DF6, 0x0045)},
        {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
        {USB_DEVICE(0x0DF6, 0x004B)},
+       {USB_DEVICE(0x0DF6, 0x005D)},
        {USB_DEVICE(0x0DF6, 0x0063)},
        /* Sweex */
        {USB_DEVICE(0x177F, 0x0154)},
index 480b0ed..115635f 100644 (file)
@@ -1021,6 +1021,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
        th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
        if (IS_ERR(th)) {
                printk(KERN_ERR "Unable to start the device-scanning thread\n");
+               complete(&dev->scanning_done);
                quiesce_and_remove_host(dev);
                err = PTR_ERR(th);
                goto errout;
index 3d1279c..7eb5617 100644 (file)
@@ -54,6 +54,7 @@
 
 /* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
 #define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS                4
 
 /* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
 #define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
  */
 void dsp_clk_exit(void)
 {
+       int i;
+
        dsp_clock_disable_all(dsp_clocks);
 
+       for (i = 0; i < DM_TIMER_CLOCKS; i++)
+               omap_dm_timer_free(timer[i]);
+
        clk_put(iva2_clk);
        clk_put(ssi.sst_fck);
        clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
 void dsp_clk_init(void)
 {
        static struct platform_device dspbridge_device;
+       int i, id;
 
        dspbridge_device.dev.bus = &platform_bus_type;
 
+       for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+               timer[i] = omap_dm_timer_request_specific(id);
+
        iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
        if (IS_ERR(iva2_clk))
                dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
                clk_enable(iva2_clk);
                break;
        case GPT_CLK:
-               timer[clk_id - 1] =
-                               omap_dm_timer_request_specific(DMT_ID(clk_id));
+               status = omap_dm_timer_start(timer[clk_id - 1]);
                break;
 #ifdef CONFIG_OMAP_MCBSP
        case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
                clk_disable(iva2_clk);
                break;
        case GPT_CLK:
-               omap_dm_timer_free(timer[clk_id - 1]);
+               status = omap_dm_timer_stop(timer[clk_id - 1]);
                break;
 #ifdef CONFIG_OMAP_MCBSP
        case MCBSP_CLK:
index c43c7e3..76cfc6e 100644 (file)
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
-
-#ifdef MODULE
 #include <linux/module.h>
-#endif
-
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/moduleparam.h>
index 09c44ab..3872b8c 100644 (file)
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 {
        struct usbip_device *ud = &vdev->ud;
        struct urb *urb;
+       unsigned long flags;
 
        spin_lock(&vdev->priv_lock);
        urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 
        usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
        usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
 
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
 {
        struct vhci_unlink *unlink;
        struct urb *urb;
+       unsigned long flags;
 
        usbip_dump_header(pdu);
 
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
                urb->status = pdu->u.ret_unlink.status;
                pr_info("urb->status %d\n", urb->status);
 
-               spin_lock(&the_controller->lock);
+               spin_lock_irqsave(&the_controller->lock, flags);
                usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
 
                usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
                                     urb->status);
index 0fd96c1..8599545 100644 (file)
@@ -614,13 +614,12 @@ int iscsit_add_reject(
        hdr     = (struct iscsi_reject *) cmd->pdu;
        hdr->reason = reason;
 
-       cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
                iscsit_release_cmd(cmd);
                return -1;
        }
-       memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
 
        spin_lock_bh(&conn->cmd_lock);
        list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
        hdr     = (struct iscsi_reject *) cmd->pdu;
        hdr->reason = reason;
 
-       cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
                iscsit_release_cmd(cmd);
                return -1;
        }
-       memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
 
        if (add_to_conn) {
                spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
                                " non-existent or non-exported iSCSI LUN:"
                                " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
                }
-               if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
-                       return iscsit_add_reject_from_cmd(
-                                       ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-                                       1, 1, buf, cmd);
-
                send_check_condition = 1;
                goto attach_cmd;
        }
@@ -1044,6 +1037,8 @@ done:
                 */
                send_check_condition = 1;
        } else {
+               cmd->data_length = cmd->se_cmd.data_length;
+
                if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
                        return iscsit_add_reject_from_cmd(
                                ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
         * the backend memory allocation.
         */
        ret = transport_generic_new_cmd(&cmd->se_cmd);
-       if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+       if (ret < 0) {
                immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
                dump_immediate_data = 1;
                goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
 
                spin_lock_irqsave(&se_cmd->t_state_lock, flags);
                if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
-                    (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+                    (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
                        dump_unsolicited_data = 1;
                spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
        if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
                if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
                        hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
-                       hdr->residual_count = cpu_to_be32(cmd->residual_count);
+                       hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
                } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
                        hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
-                       hdr->residual_count = cpu_to_be32(cmd->residual_count);
+                       hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
                }
        }
        hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
        hdr->flags              |= ISCSI_FLAG_CMD_FINAL;
        if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
                hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
-               hdr->residual_count = cpu_to_be32(cmd->residual_count);
+               hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
        } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
                hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
-               hdr->residual_count = cpu_to_be32(cmd->residual_count);
+               hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
        }
        hdr->response           = cmd->iscsi_response;
        hdr->cmd_status         = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
        hdr                     = (struct iscsi_tm_rsp *) cmd->pdu;
        memset(hdr, 0, ISCSI_HDR_LEN);
        hdr->opcode             = ISCSI_OP_SCSI_TMFUNC_RSP;
+       hdr->flags              = ISCSI_FLAG_CMD_FINAL;
        hdr->response           = iscsit_convert_tcm_tmr_rsp(se_tmr);
        hdr->itt                = cpu_to_be32(cmd->init_task_tag);
        cmd->stat_sn            = conn->stat_sn++;
index beb3946..1cd6ce3 100644 (file)
 
 static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
 {
-       int j = DIV_ROUND_UP(len, 2);
+       int j = DIV_ROUND_UP(len, 2), rc;
 
-       hex2bin(dst, src, j);
+       rc = hex2bin(dst, src, j);
+       if (rc < 0)
+               pr_debug("CHAP string contains non hex digit symbols\n");
 
        dst[j] = '\0';
        return j;
index 3723d90..f1a02da 100644 (file)
@@ -398,7 +398,6 @@ struct iscsi_cmd {
        u32                     pdu_send_order;
        /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
        u32                     pdu_start;
-       u32                     residual_count;
        /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
        u32                     seq_send_order;
        /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
        atomic_t                connection_exit;
        atomic_t                connection_recovery;
        atomic_t                connection_reinstatement;
-       atomic_t                connection_wait;
        atomic_t                connection_wait_rcfr;
        atomic_t                sleep_on_conn_wait_comp;
        atomic_t                transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
        atomic_t                session_reinstatement;
        atomic_t                session_stop_active;
        atomic_t                sleep_on_sess_wait_comp;
-       atomic_t                transport_wait_cmds;
        /* connection list */
        struct list_head        sess_conn_list;
        struct list_head        cr_active_list;
index c4c68da..101b1be 100644 (file)
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
                 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
                 */
                if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
-                       if (se_cmd->se_cmd_flags &
-                                       SCF_SCSI_RESERVATION_CONFLICT) {
+                       if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
                                cmd->i_state = ISTATE_SEND_STATUS;
                                spin_unlock_bh(&cmd->istate_lock);
                                iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
index daad362..d734bde 100644 (file)
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                pr_err("Could not allocate memory for session\n");
-               return -1;
+               return -ENOMEM;
        }
 
        iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
                pr_err("idr_pre_get() for sess_idr failed\n");
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
        spin_lock(&sess_idr_lock);
        idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                pr_err("Unable to allocate memory for"
                                " struct iscsi_sess_ops.\n");
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
 
        sess->se_sess = transport_init_session();
-       if (!sess->se_sess) {
+       if (IS_ERR(sess->se_sess)) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
 
        return 0;
index 426cd4b..98936cb 100644 (file)
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
                return NULL;
        }
 
-       login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!login->req) {
                pr_err("Unable to allocate memory for Login Request.\n");
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                goto out;
        }
-       memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
 
        login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
        if (!login->req_buf) {
index 3df1c9b..81d5832 100644 (file)
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
                        scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
                        &tl_cmd->tl_sense_buf[0]);
 
-       /*
-        * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
-        */
        if (scsi_bidi_cmnd(sc))
-               se_cmd->t_tasks_bidi = 1;
+               se_cmd->se_cmd_flags |= SCF_BIDI;
+
        /*
         * Locate the struct se_lun pointer and attach it to struct se_cmd
         */
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
         * Allocate the necessary tasks to complete the received CDB+data
         */
        ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
-       if (ret == -ENOMEM) {
-               /* Out of Resources */
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-       } else if (ret == -EINVAL) {
-               /*
-                * Handle case for SAM_STAT_RESERVATION_CONFLICT
-                */
-               if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
-               /*
-                * Otherwise, return SAM_STAT_CHECK_CONDITION and return
-                * sense data.
-                */
-               return PYX_TRANSPORT_USE_SENSE_REASON;
-       }
-
+       if (ret != 0)
+               return ret;
        /*
         * For BIDI commands, pass in the extra READ buffer
         * to transport_generic_map_mem_to_cmd() below..
         */
-       if (se_cmd->t_tasks_bidi) {
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
                struct scsi_data_buffer *sdb = scsi_in(sc);
 
                sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
        }
 
        /* Tell the core about our preallocated memory */
-       ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+       return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
                        scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
-       if (ret < 0)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-
-       return 0;
 }
 
 /*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
 {
        struct tcm_loop_hba *tl_hba = container_of(wwn,
                                struct tcm_loop_hba, tl_hba_wwn);
-       int host_no = tl_hba->sh->host_no;
+
+       pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+               " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+               tl_hba->tl_wwn_address, tl_hba->sh->host_no);
        /*
         * Call device_unregister() on the original tl_hba->dev.
         * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
         * release *tl_hba;
         */
        device_unregister(&tl_hba->dev);
-
-       pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
-               " SAS Address: %s at Linux/SCSI Host ID: %d\n",
-               config_item_name(&wwn->wwn_group.cg_item), host_no);
 }
 
 /* Start items for tcm_loop_cit */
index 88f2ad4..1dcbef4 100644 (file)
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        int alua_access_state, primary = 0, rc;
        u16 tg_pt_id, rtpi;
 
-       if (!l_port)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+       if (!l_port) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
        buf = transport_kmap_first_data_page(cmd);
 
        /*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
        if (!l_tg_pt_gp_mem) {
                pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
        spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        if (!l_tg_pt_gp) {
                spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
                pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
        rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        if (!rc) {
                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
                                " while TPGS_EXPLICT_ALUA is disabled\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
 
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * REQUEST, and the additional sense code set to INVALID
                         * FIELD IN PARAMETER LIST.
                         */
-                       rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       rc = -EINVAL;
                        goto out;
                }
                rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * throw an exception with ASCQ: INVALID_PARAMETER_LIST
                         */
                        if (rc != 0) {
-                               rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               rc = -EINVAL;
                                goto out;
                        }
                } else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * INVALID_PARAMETER_LIST
                         */
                        if (rc != 0) {
-                               rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               rc = -EINVAL;
                                goto out;
                        }
                }
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
         * struct t10_alua_lu_gp.
         */
        spin_lock(&lu_gps_lock);
-       atomic_set(&lu_gp->lu_gp_shutdown, 1);
        list_del(&lu_gp->lu_gp_node);
        alua_lu_gps_count--;
        spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
 
        tg_pt_gp_mem->tg_pt = port;
        port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
-       atomic_set(&port->sep_tg_pt_gp_active, 1);
 
        return tg_pt_gp_mem;
 }
index 683ba02..831468b 100644 (file)
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
        if (cmd->data_length < 60)
                return 0;
 
-       buf[2] = 0x3c;
+       buf[3] = 0x3c;
        /* Set HEADSUP, ORDSUP, SIMPSUP */
        buf[5] = 0x07;
 
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
        if (cmd->data_length < 4) {
                pr_err("SCSI Inquiry payload length: %u"
                        " too small for EVPD=1\n", cmd->data_length);
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
                return -EINVAL;
        }
 
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
        }
 
        pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+       cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
        ret = -EINVAL;
 
 out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
        default:
                pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
                       cdb[2] & 0x3f, cdb[3]);
-               return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+               cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+               return -EINVAL;
        }
        offset += length;
 
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
        if (cdb[1] & 0x01) {
                pr_err("REQUEST_SENSE description emulation not"
                        " supported\n");
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -ENOSYS;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
        if (!dev->transport->do_discard) {
                pr_err("UNMAP emulation not supported for: %s\n",
                                dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        /* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
        if (!dev->transport->do_discard) {
                pr_err("WRITE_SAME emulation not supported"
                                " for: %s\n", dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
 int target_emulate_synchronize_cache(struct se_task *task)
 {
        struct se_device *dev = task->task_se_cmd->se_dev;
+       struct se_cmd *cmd = task->task_se_cmd;
 
        if (!dev->transport->do_sync_cache) {
                pr_err("SYNCHRONIZE_CACHE emulation not supported"
                        " for: %s\n", dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        dev->transport->do_sync_cache(task);
index e0c1e8a..93d4f6a 100644 (file)
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
 static struct config_group alua_group;
 static struct config_group alua_lu_gps_group;
 
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
 static inline struct se_hba *
 item_to_hba(struct config_item *item)
 {
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
                                " struct se_subsystem_dev\n");
                goto unlock;
        }
-       INIT_LIST_HEAD(&se_dev->se_dev_node);
        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
                        " from allocate_virtdevice()\n");
                goto out;
        }
-       spin_lock(&se_device_lock);
-       list_add_tail(&se_dev->se_dev_node, &se_dev_list);
-       spin_unlock(&se_device_lock);
 
        config_group_init_type_name(&se_dev->se_dev_group, name,
                        &target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
        mutex_lock(&hba->hba_access_mutex);
        t = hba->transport;
 
-       spin_lock(&se_device_lock);
-       list_del(&se_dev->se_dev_node);
-       spin_unlock(&se_device_lock);
-
        dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
        for (i = 0; dev_stat_grp->default_groups[i]; i++) {
                df_item = &dev_stat_grp->default_groups[i]->cg_item;
index ba5edec..9b86394 100644 (file)
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_cmd->se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->orig_fe_lun = 0;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        /*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
 
@@ -708,7 +705,7 @@ done:
 
        se_task->task_scsi_status = GOOD;
        transport_complete_task(se_task, 1);
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("dpo_emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("dpo_emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       if (dev->transport->fua_write_emulated == 0) {
+       if (flag && dev->transport->fua_write_emulated == 0) {
                pr_err("fua_write_emulated not supported\n");
                return -EINVAL;
        }
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("ua read emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("ua read emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       if (dev->transport->write_cache_emulated == 0) {
+       if (flag && dev->transport->write_cache_emulated == 0) {
                pr_err("write_cache_emulated not supported\n");
                return -EINVAL;
        }
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
                ret = -ENOMEM;
                goto out;
        }
-       INIT_LIST_HEAD(&se_dev->se_dev_node);
        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
index 67cd6fe..b4864fb 100644 (file)
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
                return -ENOMEM;
        }
 
-       for (i = 0; i < task->task_sg_nents; i++) {
-               iov[i].iov_len = sg[i].length;
-               iov[i].iov_base = sg_virt(&sg[i]);
+       for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+               iov[i].iov_len = sg->length;
+               iov[i].iov_base = sg_virt(sg);
        }
 
        old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
                return -ENOMEM;
        }
 
-       for (i = 0; i < task->task_sg_nents; i++) {
-               iov[i].iov_len = sg[i].length;
-               iov[i].iov_base = sg_virt(&sg[i]);
+       for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+               iov[i].iov_len = sg->length;
+               iov[i].iov_base = sg_virt(sg);
        }
 
        old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
                if (ret > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-                   cmd->t_tasks_fua) {
+                   (cmd->se_cmd_flags & SCF_FUA)) {
                        /*
                         * We might need to be a bit smarter here
                         * and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
 
        }
 
-       if (ret < 0)
+       if (ret < 0) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                return ret;
+       }
        if (ret) {
                task->task_scsi_status = GOOD;
                transport_complete_task(task, 1);
        }
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     fd_free_task(): (Part of se_subsystem_api_t template)
index 7698efe..4aa9922 100644 (file)
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
                 */
                if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
                    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-                    task->task_se_cmd->t_tasks_fua))
+                    (cmd->se_cmd_flags & SCF_FUA)))
                        rw = WRITE_FUA;
                else
                        rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
        else {
                pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
                                " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOSYS;
        }
 
        bio = iblock_get_bio(task, block_lba, sg_num);
-       if (!bio)
-               return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       if (!bio) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOMEM;
+       }
 
        bio_list_init(&list);
        bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
                submit_bio(rw, bio);
        blk_finish_plug(&plug);
 
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 
 fail:
        while ((bio = bio_list_pop(&list)))
                bio_put(bio);
-       return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 static u32 iblock_get_device_rev(struct se_device *dev)
index 5a4ebfc..95dee70 100644 (file)
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
                pr_err("Received legacy SPC-2 RESERVE/RELEASE"
                        " while active SPC-3 registrations exist,"
                        " returning RESERVATION_CONFLICT\n");
-               *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
                return true;
        }
 
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
            (cmd->t_task_cdb[1] & 0x02)) {
                pr_err("LongIO and Obselete Bits set, returning"
                                " ILLEGAL_REQUEST\n");
-               ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
                        " from %s \n", cmd->se_lun->unpacked_lun,
                        cmd->se_deve->mapped_lun,
                        sess->se_node_acl->initiatorname);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out_unlock;
        }
 
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
        tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
        if (!tidh_new) {
                pr_err("Unable to allocate tidh_new\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        INIT_LIST_HEAD(&tidh_new->dest_list);
        tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
                                sa_res_key, all_tg_pt, aptpl);
        if (!local_pr_reg) {
                kfree(tidh_new);
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOMEM;
        }
        tidh_new->dest_pr_reg = local_pr_reg;
        /*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
                pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
                        " does not equal CDB data_length: %u\n", tpdl,
                        cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
                                        " for tmp_tpg\n");
                                atomic_dec(&tmp_tpg->tpg_pr_ref_count);
                                smp_mb__after_atomic_dec();
-                               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               ret = -EINVAL;
                                goto out;
                        }
                        /*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
                                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                                smp_mb__after_atomic_dec();
                                core_scsi3_tpg_undepend_item(tmp_tpg);
-                               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               ret = -EINVAL;
                                goto out;
                        }
 
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
                if (!dest_tpg) {
                        pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
                                        " dest_tpg\n");
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
 #if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
                                " %u for Transport ID: %s\n", tid_len, ptr);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                /*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
 
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
 
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
                        smp_mb__after_atomic_dec();
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       ret = -EINVAL;
                        goto out;
                }
 #if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_lunacl_undepend_item(dest_se_deve);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       ret = -ENOMEM;
                        goto out;
                }
                INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
                        kfree(tidh_new);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        se_tpg = se_sess->se_tpg;
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
                if (res_key) {
                        pr_warn("SPC-3 PR: Reservation Key non-zero"
                                " for SA REGISTER, returning CONFLICT\n");
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * Do nothing but return GOOD status.
                 */
                if (!sa_res_key)
-                       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+                       return 0;
 
                if (!spec_i_pt) {
                        /*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
                        if (ret != 0) {
                                pr_err("Unable to allocate"
                                        " struct t10_pr_registration\n");
-                               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               return -EINVAL;
                        }
                } else {
                        /*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
                                        " 0x%016Lx\n", res_key,
                                        pr_reg->pr_res_key);
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                               return -EINVAL;
                        }
                }
                if (spec_i_pt) {
                        pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
                                " set while sa_res_key=0\n");
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       return -EINVAL;
                }
                /*
                 * An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
                                " registration exists, but ALL_TG_PT=1 bit not"
                                " present in received PROUT\n");
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_INVALID_CDB_FIELD;
+                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+                       return -EINVAL;
                }
                /*
                 * Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
                                pr_err("Unable to allocate"
                                        " pr_aptpl_buf\n");
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               return -EINVAL;
                        }
                }
                /*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
                        if (pr_holder < 0) {
                                kfree(pr_aptpl_buf);
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                               return -EINVAL;
                        }
 
                        spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        se_tpg = se_sess->se_tpg;
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RESERVE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
                        " does not match existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        /*
         * See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
 
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
 
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        /*
         * Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
        default:
                pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
                        " 0x%02x\n", type);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RELEASE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
            (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
 
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
        if (!pr_reg_n) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for CLEAR\n");
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
                        " existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
        int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
        int prh_type = 0, prh_scope = 0, ret;
 
-       if (!se_sess)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+       if (!se_sess) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
 
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
        pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for PREEMPT%s\n",
                        (abort) ? "_AND_ABORT" : "");
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        if (pr_reg_n->pr_res_key != res_key) {
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        INIT_LIST_HEAD(&preempt_and_abort_list);
 
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
        if (!all_reg && !sa_res_key) {
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        /*
         * From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
                if (!released_regs) {
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg_n);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
        default:
                pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
                        " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        memset(dest_iport, 0, 64);
        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
                        " *pr_reg for REGISTER_AND_MOVE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " res_key: 0x%016Lx does not match existing SA REGISTER"
                        " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
                        " sa_res_key\n");
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        /*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " does not equal CDB data_length: %u\n", tid_len,
                        cmd->data_length);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
                        smp_mb__after_atomic_dec();
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return -EINVAL;
                }
 
                spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " fabric ops from Relative Target Port Identifier:"
                        " %hu\n", rtpi);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " from fabric: %s\n", proto_ident,
                        dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
                        dest_tf_ops->get_fabric_name());
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
                        " containg a valid tpg_parse_pr_out_transport_id"
                        " function pointer\n");
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = -EINVAL;
                goto out;
        }
        initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!initiator_str) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
                        " initiator_str from Transport ID\n");
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
                        " matches: %s on received I_T Nexus\n", initiator_str,
                        pr_reg_nacl->initiatorname);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " matches: %s %s on received I_T Nexus\n",
                        initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
                        pr_reg->pr_reg_isid);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
                pr_err("Unable to locate %s dest_node_acl for"
                        " TransportID%s\n", dest_tf_ops->get_fabric_name(),
                        initiator_str);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_node_acl = NULL;
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 #if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
        if (!dest_se_deve) {
                pr_err("Unable to locate %s dest_se_deve from RTPI:"
                        " %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3553,7 +3615,8 @@ after_iport_check:
                atomic_dec(&dest_se_deve->pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_se_deve = NULL;
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = -EINVAL;
                goto out;
        }
 #if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
                        " currently held\n");
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3585,7 +3649,8 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
                        " Nexus is not reservation holder\n");
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3603,7 +3668,8 @@ after_iport_check:
                        " reservation for type: %s\n",
                        core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out;
        }
        pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
                                sa_res_key, 0, aptpl, 2, 1);
                if (ret != 0) {
                        spin_unlock(&dev->dev_reservation_lock);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = EINVAL;
                goto out;
        }
 
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
         * FIXME: A NULL struct se_session pointer means an this is not coming from
         * a $FABRIC_MOD's nexus, but from internal passthrough ops.
         */
-       if (!cmd->se_sess)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+       if (!cmd->se_sess) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
 
        if (cmd->data_length < 24) {
                pr_warn("SPC-PR: Received PR OUT parameter list"
                        " length too small: %u\n", cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
         * SPEC_I_PT=1 is only valid for Service action: REGISTER
         */
        if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
            (cmd->data_length != 24)) {
                pr_warn("SPC-PR: Received PR OUT illegal parameter"
                        " list length: %u\n", cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_OUT service"
                        " action: 0x%02x\n", cdb[1] & 0x1f);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                break;
        }
 
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
        if (cmd->data_length < 6) {
                pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
                        " %u too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
 
        switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_IN service"
                        " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                break;
        }
 
index ed32e1e..8b15e56 100644 (file)
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
 static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
                struct bio **hbio)
 {
+       struct se_cmd *cmd = task->task_se_cmd;
        struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
        u32 task_sg_num = task->task_sg_nents;
        struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
        u32 data_len = task->task_size, i, len, bytes, off;
        int nr_pages = (task->task_size + task_sg[0].offset +
                        PAGE_SIZE - 1) >> PAGE_SHIFT;
-       int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       int nr_vecs = 0, rc;
        int rw = (task->task_data_direction == DMA_TO_DEVICE);
 
        *hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       return ret;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 static int pscsi_do_task(struct se_task *task)
 {
+       struct se_cmd *cmd = task->task_se_cmd;
        struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
        struct pscsi_plugin_task *pt = PSCSI_TASK(task);
        struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
                if (!req || IS_ERR(req)) {
                        pr_err("PSCSI: blk_get_request() failed: %ld\n",
                                        req ? IS_ERR(req) : -ENOMEM);
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return -ENODEV;
                }
        } else {
                BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
                 * Setup the main struct request for the task->task_sg[] payload
                 */
                ret = pscsi_map_sg(task, task->task_sg, &hbio);
-               if (ret < 0)
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+               if (ret < 0) {
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return ret;
+               }
 
                req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
                                       GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
                        (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
                        pscsi_req_done);
 
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 
 fail:
        while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 /*     pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
                        " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
                        pt->pscsi_result);
                task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-               task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
-               task->task_se_cmd->transport_error_status =
-                                       PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               task->task_se_cmd->scsi_sense_reason =
+                                       TCM_UNSUPPORTED_SCSI_OPCODE;
                transport_complete_task(task, 0);
                break;
        }
index 5158d38..02e51fa 100644 (file)
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
        return NULL;
 }
 
-/*     rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
 {
        struct se_task *task = &req->rd_task;
        struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
        struct rd_dev_sg_table *table;
-       struct scatterlist *sg_d, *sg_s;
-       void *dst, *src;
-       u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
-       u32 length, page_end = 0, table_sg_end;
+       struct scatterlist *rd_sg;
+       struct sg_mapping_iter m;
        u32 rd_offset = req->rd_offset;
+       u32 src_len;
 
        table = rd_get_sg_table(dev, req->rd_page);
        if (!table)
                return -EINVAL;
 
-       table_sg_end = (table->page_end_offset - req->rd_page);
-       sg_d = task->task_sg;
-       sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+       rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
 
-       pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
-               " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
-               req->rd_page, req->rd_offset);
-
-       src_offset = rd_offset;
+       pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+                       dev->rd_dev_id, read_rd ? "Read" : "Write",
+                       task->task_lba, req->rd_size, req->rd_page,
+                       rd_offset);
 
+       src_len = PAGE_SIZE - rd_offset;
+       sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+                       read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
        while (req->rd_size) {
-               if ((sg_d[i].length - dst_offset) <
-                   (sg_s[j].length - src_offset)) {
-                       length = (sg_d[i].length - dst_offset);
-
-                       pr_debug("Step 1 - sg_d[%d]: %p length: %d"
-                               " offset: %u sg_s[%d].length: %u\n", i,
-                               &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
-                               sg_s[j].length);
-                       pr_debug("Step 1 - length: %u dst_offset: %u"
-                               " src_offset: %u\n", length, dst_offset,
-                               src_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       dst = sg_virt(&sg_d[i++]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       src = sg_virt(&sg_s[j]) + src_offset;
-                       BUG_ON(!src);
-
-                       dst_offset = 0;
-                       src_offset = length;
-                       page_end = 0;
-               } else {
-                       length = (sg_s[j].length - src_offset);
-
-                       pr_debug("Step 2 - sg_d[%d]: %p length: %d"
-                               " offset: %u sg_s[%d].length: %u\n", i,
-                               &sg_d[i], sg_d[i].length, sg_d[i].offset,
-                               j, sg_s[j].length);
-                       pr_debug("Step 2 - length: %u dst_offset: %u"
-                               " src_offset: %u\n", length, dst_offset,
-                               src_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       dst = sg_virt(&sg_d[i]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       if (sg_d[i].length == length) {
-                               i++;
-                               dst_offset = 0;
-                       } else
-                               dst_offset = length;
-
-                       src = sg_virt(&sg_s[j++]) + src_offset;
-                       BUG_ON(!src);
-
-                       src_offset = 0;
-                       page_end = 1;
-               }
+               u32 len;
+               void *rd_addr;
 
-               memcpy(dst, src, length);
+               sg_miter_next(&m);
+               len = min((u32)m.length, src_len);
+               m.consumed = len;
 
-               pr_debug("page: %u, remaining size: %u, length: %u,"
-                       " i: %u, j: %u\n", req->rd_page,
-                       (req->rd_size - length), length, i, j);
+               rd_addr = sg_virt(rd_sg) + rd_offset;
 
-               req->rd_size -= length;
-               if (!req->rd_size)
-                       return 0;
+               if (read_rd)
+                       memcpy(m.addr, rd_addr, len);
+               else
+                       memcpy(rd_addr, m.addr, len);
 
-               if (!page_end)
+               req->rd_size -= len;
+               if (!req->rd_size)
                        continue;
 
-               if (++req->rd_page <= table->page_end_offset) {
-                       pr_debug("page: %u in same page table\n",
-                               req->rd_page);
+               src_len -= len;
+               if (src_len) {
+                       rd_offset += len;
                        continue;
                }
 
-               pr_debug("getting new page table for page: %u\n",
-                               req->rd_page);
-
-               table = rd_get_sg_table(dev, req->rd_page);
-               if (!table)
-                       return -EINVAL;
-
-               sg_s = &table->sg_table[j = 0];
-       }
-
-       return 0;
-}
-
-/*     rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
-       struct se_task *task = &req->rd_task;
-       struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
-       struct rd_dev_sg_table *table;
-       struct scatterlist *sg_d, *sg_s;
-       void *dst, *src;
-       u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
-       u32 length, page_end = 0, table_sg_end;
-       u32 rd_offset = req->rd_offset;
-
-       table = rd_get_sg_table(dev, req->rd_page);
-       if (!table)
-               return -EINVAL;
-
-       table_sg_end = (table->page_end_offset - req->rd_page);
-       sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
-       sg_s = task->task_sg;
-
-       pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
-               " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
-               req->rd_page, req->rd_offset);
-
-       dst_offset = rd_offset;
-
-       while (req->rd_size) {
-               if ((sg_s[i].length - src_offset) <
-                   (sg_d[j].length - dst_offset)) {
-                       length = (sg_s[i].length - src_offset);
-
-                       pr_debug("Step 1 - sg_s[%d]: %p length: %d"
-                               " offset: %d sg_d[%d].length: %u\n", i,
-                               &sg_s[i], sg_s[i].length, sg_s[i].offset,
-                               j, sg_d[j].length);
-                       pr_debug("Step 1 - length: %u src_offset: %u"
-                               " dst_offset: %u\n", length, src_offset,
-                               dst_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       src = sg_virt(&sg_s[i++]) + src_offset;
-                       BUG_ON(!src);
-
-                       dst = sg_virt(&sg_d[j]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       src_offset = 0;
-                       dst_offset = length;
-                       page_end = 0;
-               } else {
-                       length = (sg_d[j].length - dst_offset);
-
-                       pr_debug("Step 2 - sg_s[%d]: %p length: %d"
-                               " offset: %d sg_d[%d].length: %u\n", i,
-                               &sg_s[i], sg_s[i].length, sg_s[i].offset,
-                               j, sg_d[j].length);
-                       pr_debug("Step 2 - length: %u src_offset: %u"
-                               " dst_offset: %u\n", length, src_offset,
-                               dst_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       src = sg_virt(&sg_s[i]) + src_offset;
-                       BUG_ON(!src);
-
-                       if (sg_s[i].length == length) {
-                               i++;
-                               src_offset = 0;
-                       } else
-                               src_offset = length;
-
-                       dst = sg_virt(&sg_d[j++]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       dst_offset = 0;
-                       page_end = 1;
-               }
-
-               memcpy(dst, src, length);
-
-               pr_debug("page: %u, remaining size: %u, length: %u,"
-                       " i: %u, j: %u\n", req->rd_page,
-                       (req->rd_size - length), length, i, j);
-
-               req->rd_size -= length;
-               if (!req->rd_size)
-                       return 0;
-
-               if (!page_end)
-                       continue;
-
-               if (++req->rd_page <= table->page_end_offset) {
-                       pr_debug("page: %u in same page table\n",
-                               req->rd_page);
+               /* rd page completed, next one please */
+               req->rd_page++;
+               rd_offset = 0;
+               src_len = PAGE_SIZE;
+               if (req->rd_page <= table->page_end_offset) {
+                       rd_sg++;
                        continue;
                }
 
-               pr_debug("getting new page table for page: %u\n",
-                               req->rd_page);
-
                table = rd_get_sg_table(dev, req->rd_page);
-               if (!table)
+               if (!table) {
+                       sg_miter_stop(&m);
                        return -EINVAL;
+               }
 
-               sg_d = &table->sg_table[j = 0];
+               /* since we increment, the first sg entry is correct */
+               rd_sg = table->sg_table;
        }
-
+       sg_miter_stop(&m);
        return 0;
 }
 
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
 {
        struct se_device *dev = task->task_se_cmd->se_dev;
        struct rd_request *req = RD_REQ(task);
-       unsigned long long lba;
+       u64 tmp;
        int ret;
 
-       req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
-       lba = task->task_lba;
-       req->rd_offset = (do_div(lba,
-                         (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
-                          dev->se_sub_dev->se_dev_attrib.block_size;
+       tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+       req->rd_offset = do_div(tmp, PAGE_SIZE);
+       req->rd_page = tmp;
        req->rd_size = task->task_size;
 
-       if (task->task_data_direction == DMA_FROM_DEVICE)
-               ret = rd_MEMCPY_read(req);
-       else
-               ret = rd_MEMCPY_write(req);
-
+       ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
        if (ret != 0)
                return ret;
 
        task->task_scsi_status = GOOD;
        transport_complete_task(task, 1);
-
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     rd_free_task(): (Part of se_subsystem_api_t template)
index 217e29d..6845228 100644 (file)
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
                        " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
                        "Preempt" : "", cmd, cmd->t_state,
                        atomic_read(&cmd->t_fe_count));
-               /*
-                * Signal that the command has failed via cmd->se_cmd_flags,
-                */
-               transport_new_cmd_failure(cmd);
 
                core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
                                atomic_read(&cmd->t_fe_count));
index 3400ae6..0257658 100644 (file)
@@ -61,7 +61,6 @@
 static int sub_api_initialized;
 
 static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
 static struct kmem_cache *se_sess_cache;
 struct kmem_cache *se_tmr_req_cache;
 struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
 static void transport_put_cmd(struct se_cmd *cmd);
 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
 {
-       se_cmd_cache = kmem_cache_create("se_cmd_cache",
-                       sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
-       if (!se_cmd_cache) {
-               pr_err("kmem_cache_create for struct se_cmd failed\n");
-               goto out;
-       }
        se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
                        sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
                        0, NULL);
        if (!se_tmr_req_cache) {
                pr_err("kmem_cache_create() for struct se_tmr_req"
                                " failed\n");
-               goto out_free_cmd_cache;
+               goto out;
        }
        se_sess_cache = kmem_cache_create("se_sess_cache",
                        sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
        kmem_cache_destroy(se_sess_cache);
 out_free_tmr_req_cache:
        kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
-       kmem_cache_destroy(se_cmd_cache);
 out:
        return -ENOMEM;
 }
@@ -191,7 +182,6 @@ out:
 void release_se_kmem_caches(void)
 {
        destroy_workqueue(target_completion_wq);
-       kmem_cache_destroy(se_cmd_cache);
        kmem_cache_destroy(se_tmr_req_cache);
        kmem_cache_destroy(se_sess_cache);
        kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
                task->task_scsi_status = GOOD;
        } else {
                task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-               task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
-               task->task_se_cmd->transport_error_status =
-                                       PYX_TRANSPORT_ILLEGAL_REQUEST;
+               task->task_se_cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
        }
 
        transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
 {
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 
-       transport_generic_request_failure(cmd, 1, 1);
+       transport_generic_request_failure(cmd);
 }
 
 /*     transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
        if (cmd->t_tasks_failed) {
                if (!task->task_error_status) {
                        task->task_error_status =
-                               PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
-                       cmd->transport_error_status =
-                               PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
+
                INIT_WORK(&cmd->work, target_complete_failure_work);
        } else {
                atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
        dev->se_hba             = hba;
        dev->se_sub_dev         = se_dev;
        dev->transport          = transport;
-       atomic_set(&dev->active_cmds, 0);
        INIT_LIST_HEAD(&dev->dev_list);
        INIT_LIST_HEAD(&dev->dev_sep_list);
        INIT_LIST_HEAD(&dev->dev_tmr_list);
        INIT_LIST_HEAD(&dev->execute_task_list);
        INIT_LIST_HEAD(&dev->delayed_cmd_list);
-       INIT_LIST_HEAD(&dev->ordered_cmd_list);
        INIT_LIST_HEAD(&dev->state_task_list);
        INIT_LIST_HEAD(&dev->qf_cmd_list);
        spin_lock_init(&dev->execute_task_lock);
        spin_lock_init(&dev->delayed_cmd_lock);
-       spin_lock_init(&dev->ordered_cmd_lock);
-       spin_lock_init(&dev->state_task_lock);
-       spin_lock_init(&dev->dev_alua_lock);
        spin_lock_init(&dev->dev_reservation_lock);
        spin_lock_init(&dev->dev_status_lock);
-       spin_lock_init(&dev->dev_status_thr_lock);
        spin_lock_init(&dev->se_port_lock);
        spin_lock_init(&dev->se_tmr_lock);
        spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
 {
        INIT_LIST_HEAD(&cmd->se_lun_node);
        INIT_LIST_HEAD(&cmd->se_delayed_node);
-       INIT_LIST_HEAD(&cmd->se_ordered_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
        INIT_LIST_HEAD(&cmd->se_queue_node);
        INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
                pr_err("Received SCSI CDB with command_size: %d that"
                        " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
                        scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
                return -EINVAL;
        }
        /*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
                                scsi_command_size(cdb),
                                (unsigned long)sizeof(cmd->__t_task_cdb));
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        return -ENOMEM;
                }
        } else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
         * and call transport_generic_request_failure() if necessary..
         */
        ret = transport_generic_new_cmd(cmd);
-       if (ret < 0) {
-               cmd->transport_error_status = ret;
-               transport_generic_request_failure(cmd, 0,
-                               (cmd->data_direction != DMA_TO_DEVICE));
-       }
+       if (ret < 0)
+               transport_generic_request_failure(cmd);
+
        return 0;
 }
 EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 /*
  * Handle SAM-esque emulation for generic transport request failures.
  */
-static void transport_generic_request_failure(
-       struct se_cmd *cmd,
-       int complete,
-       int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
 {
        int ret = 0;
 
        pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
                " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
                cmd->t_task_cdb[0]);
-       pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+       pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
                cmd->se_tfo->get_cmd_state(cmd),
-               cmd->t_state,
-               cmd->transport_error_status);
+               cmd->t_state, cmd->scsi_sense_reason);
        pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
                " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
                " t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
        if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
                transport_complete_task_attr(cmd);
 
-       if (complete) {
-               cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
-       }
-
-       switch (cmd->transport_error_status) {
-       case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-               break;
-       case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
-               cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
-               break;
-       case PYX_TRANSPORT_INVALID_CDB_FIELD:
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               break;
-       case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               break;
-       case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
-               if (!sc)
-                       transport_new_cmd_failure(cmd);
-               /*
-                * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
-                * we force this session to fall back to session
-                * recovery.
-                */
-               cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
-               cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
-               goto check_stop;
-       case PYX_TRANSPORT_LU_COMM_FAILURE:
-       case PYX_TRANSPORT_ILLEGAL_REQUEST:
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               break;
-       case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
-               cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
-               break;
-       case PYX_TRANSPORT_WRITE_PROTECTED:
-               cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+       switch (cmd->scsi_sense_reason) {
+       case TCM_NON_EXISTENT_LUN:
+       case TCM_UNSUPPORTED_SCSI_OPCODE:
+       case TCM_INVALID_CDB_FIELD:
+       case TCM_INVALID_PARAMETER_LIST:
+       case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+       case TCM_UNKNOWN_MODE_PAGE:
+       case TCM_WRITE_PROTECTED:
+       case TCM_CHECK_CONDITION_ABORT_CMD:
+       case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+       case TCM_CHECK_CONDITION_NOT_READY:
                break;
-       case PYX_TRANSPORT_RESERVATION_CONFLICT:
+       case TCM_RESERVATION_CONFLICT:
                /*
                 * No SENSE Data payload for this case, set SCSI Status
                 * and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
                if (ret == -EAGAIN || ret == -ENOMEM)
                        goto queue_full;
                goto check_stop;
-       case PYX_TRANSPORT_USE_SENSE_REASON:
-               /*
-                * struct se_cmd->scsi_sense_reason already set
-                */
-               break;
        default:
                pr_err("Unknown transport error for CDB 0x%02x: %d\n",
-                       cmd->t_task_cdb[0],
-                       cmd->transport_error_status);
+                       cmd->t_task_cdb[0], cmd->scsi_sense_reason);
                cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
                break;
        }
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
         * transport_send_check_condition_and_sense() after handling
         * possible unsoliticied write data payloads.
         */
-       if (!sc && !cmd->se_tfo->new_cmd_map)
-               transport_new_cmd_failure(cmd);
-       else {
-               ret = transport_send_check_condition_and_sense(cmd,
-                               cmd->scsi_sense_reason, 0);
-               if (ret == -EAGAIN || ret == -ENOMEM)
-                       goto queue_full;
-       }
+       ret = transport_send_check_condition_and_sense(cmd,
+                       cmd->scsi_sense_reason, 0);
+       if (ret == -EAGAIN || ret == -ENOMEM)
+               goto queue_full;
 
 check_stop:
        transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
         * to allow the passed struct se_cmd list of tasks to the front of the list.
         */
         if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-               atomic_inc(&cmd->se_dev->dev_hoq_count);
-               smp_mb__after_atomic_inc();
                pr_debug("Added HEAD_OF_QUEUE for CDB:"
                        " 0x%02x, se_ordered_id: %u\n",
                        cmd->t_task_cdb[0],
                        cmd->se_ordered_id);
                return 1;
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-               spin_lock(&cmd->se_dev->ordered_cmd_lock);
-               list_add_tail(&cmd->se_ordered_node,
-                               &cmd->se_dev->ordered_cmd_list);
-               spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
                atomic_inc(&cmd->se_dev->dev_ordered_sync);
                smp_mb__after_atomic_inc();
 
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
 {
        int add_tasks;
 
-       if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
-               cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
-               transport_generic_request_failure(cmd, 0, 1);
+       if (se_dev_check_online(cmd->se_dev) != 0) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               transport_generic_request_failure(cmd);
                return 0;
        }
 
@@ -2163,14 +2102,13 @@ check_depth:
        else
                error = dev->transport->do_task(task);
        if (error != 0) {
-               cmd->transport_error_status = error;
                spin_lock_irqsave(&cmd->t_state_lock, flags);
                task->task_flags &= ~TF_ACTIVE;
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                atomic_set(&cmd->t_transport_sent, 0);
                transport_stop_tasks_for_cmd(cmd);
                atomic_inc(&dev->depth_left);
-               transport_generic_request_failure(cmd, 0, 1);
+               transport_generic_request_failure(cmd);
        }
 
        goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
        return 0;
 }
 
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
-       unsigned long flags;
-       /*
-        * Any unsolicited data will get dumped for failed command inside of
-        * the fabric plugin
-        */
-       spin_lock_irqsave(&se_cmd->t_state_lock, flags);
-       se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
-       se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
 static inline u32 transport_get_sectors_6(
        unsigned char *cdb,
        struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
 
        /*
         * Everything else assume TYPE_DISK Sector CDB location.
-        * Use 8-bit sector value.
+        * Use 8-bit sector value.  SBC-3 says:
+        *
+        *   A TRANSFER LENGTH field set to zero specifies that 256
+        *   logical blocks shall be written.  Any other value
+        *   specifies the number of logical blocks that shall be
+        *   written.
         */
 type_disk:
-       return (u32)cdb[4];
+       return cdb[4] ? : 256;
 }
 
 static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
        return -1;
 }
 
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
-       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
-       cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
-       /*
-        * For UA Interlock Code 11b, a RESERVATION CONFLICT will
-        * establish a UNIT ATTENTION with PREVIOUS RESERVATION
-        * CONFLICT STATUS.
-        *
-        * See spc4r17, section 7.4.6 Control Mode Page, Table 349
-        */
-       if (cmd->se_sess &&
-           cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
-               core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
-                       cmd->orig_fe_lun, 0x2C,
-                       ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
-       return -EINVAL;
-}
-
 static inline long long transport_dev_end_lba(struct se_device *dev)
 {
        return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
         */
        if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
                if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
-                                       cmd, cdb, pr_reg_type) != 0)
-                       return transport_handle_reservation_conflict(cmd);
+                                       cmd, cdb, pr_reg_type) != 0) {
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EBUSY;
+               }
                /*
                 * This means the CDB is allowed for the SCSI Initiator port
                 * when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_32(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_32(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_64(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case XDWRITEREAD_10:
                if ((cmd->data_direction != DMA_TO_DEVICE) ||
-                   !(cmd->t_tasks_bidi))
+                   !(cmd->se_cmd_flags & SCF_BIDI))
                        goto out_invalid_cdb_field;
                sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
                if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
                 * Setup BIDI XOR callback to be run after I/O completion.
                 */
                cmd->transport_complete_callback = &transport_xor_callback;
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                break;
        case VARIABLE_LENGTH_CMD:
                service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
                         * completion.
                         */
                        cmd->transport_complete_callback = &transport_xor_callback;
-                       cmd->t_tasks_fua = (cdb[10] & 0x8);
+                       if (cdb[1] & 0x8)
+                               cmd->se_cmd_flags |= SCF_FUA;
                        break;
                case WRITE_SAME_32:
                        sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                        " SIMPLE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-               atomic_dec(&dev->dev_hoq_count);
-               smp_mb__after_atomic_dec();
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for"
                        " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-               spin_lock(&dev->ordered_cmd_lock);
-               list_del(&cmd->se_ordered_node);
                atomic_dec(&dev->dev_ordered_sync);
                smp_mb__after_atomic_dec();
-               spin_unlock(&dev->ordered_cmd_lock);
 
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
 
        if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
            (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+               /*
+                * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+                * scatterlists already have been set to follow what the fabric
+                * passes for the original expected data transfer length.
+                */
+               if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+                       pr_warn("Rejecting SCSI DATA overflow for fabric using"
+                               " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+                       return -EINVAL;
+               }
 
                cmd->t_data_sg = sgl;
                cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
            cmd->data_length) {
                ret = transport_generic_get_mem(cmd);
                if (ret < 0)
-                       return ret;
+                       goto out_fail;
        }
 
        /*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
                task_cdbs = transport_allocate_control_task(cmd);
        }
 
-       if (task_cdbs <= 0)
+       if (task_cdbs < 0)
                goto out_fail;
+       else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+               cmd->t_state = TRANSPORT_COMPLETE;
+               atomic_set(&cmd->t_transport_active, 1);
+               INIT_WORK(&cmd->work, target_complete_ok_work);
+               queue_work(target_completion_wq, &cmd->work);
+               return 0;
+       }
 
        if (set_counts) {
                atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
        else if (ret < 0)
                return ret;
 
-       return PYX_TRANSPORT_WRITE_PENDING;
+       return 1;
 
 queue_full:
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
                        atomic_inc(&cmd->t_transport_aborted);
                        smp_mb__after_atomic_inc();
-                       cmd->scsi_status = SAM_STAT_TASK_ABORTED;
-                       transport_new_cmd_failure(cmd);
-                       return;
                }
        }
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
        struct se_cmd *cmd;
        struct se_device *dev = (struct se_device *) param;
 
-       set_user_nice(current, -20);
-
        while (!kthread_should_stop()) {
                ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
                                atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
                        }
                        ret = cmd->se_tfo->new_cmd_map(cmd);
                        if (ret < 0) {
-                               cmd->transport_error_status = ret;
-                               transport_generic_request_failure(cmd,
-                                               0, (cmd->data_direction !=
-                                                   DMA_TO_DEVICE));
+                               transport_generic_request_failure(cmd);
                                break;
                        }
                        ret = transport_generic_new_cmd(cmd);
                        if (ret < 0) {
-                               cmd->transport_error_status = ret;
-                               transport_generic_request_failure(cmd,
-                                       0, (cmd->data_direction !=
-                                        DMA_TO_DEVICE));
+                               transport_generic_request_failure(cmd);
+                               break;
                        }
                        break;
                case TRANSPORT_PROCESS_WRITE:
index 4fac37c..71fc9ce 100644 (file)
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
        lport = ep->lp;
        fp = fc_frame_alloc(lport, sizeof(*txrdy));
        if (!fp)
-               return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+               return -ENOMEM; /* Signal QUEUE_FULL */
 
        txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
        memset(txrdy, 0, sizeof(*txrdy));
index 5f77041..9402b73 100644 (file)
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
        struct ft_lport_acl *lacl = container_of(wwn,
                                struct ft_lport_acl, fc_lport_wwn);
 
-       pr_debug("del lport %s\n",
-                       config_item_name(&wwn->wwn_group.cg_item));
+       pr_debug("del lport %s\n", lacl->name);
        mutex_lock(&ft_lport_lock);
        list_del(&lacl->list);
        mutex_unlock(&ft_lport_lock);
index e8c564a..a8078d0 100644 (file)
@@ -1458,6 +1458,16 @@ static const struct usb_device_id acm_ids[] = {
        },
        { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
        },
+       /* Motorola H24 HSPA module: */
+       { USB_DEVICE(0x22b8, 0x2d91) }, /* modem                                */
+       { USB_DEVICE(0x22b8, 0x2d92) }, /* modem           + diagnostics        */
+       { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port                      */
+       { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics        */
+       { USB_DEVICE(0x22b8, 0x2d96) }, /* modem                         + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d97) }, /* modem           + diagnostics + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port               + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
        { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
        .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
                                           data interface instead of
index 717ebc9..600d823 100644 (file)
@@ -264,7 +264,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
                ret = -ENODEV;
                goto err0;
        }
-       dwc->revision = reg & DWC3_GSNPSREV_MASK;
+       dwc->revision = reg;
 
        dwc3_core_soft_reset(dwc);
 
index 4730016..45f422a 100644 (file)
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
        u32 tmp;
 
        if (!driver || !bind || !driver->setup
-                       || driver->speed != USB_SPEED_HIGH)
+                       || driver->speed < USB_SPEED_HIGH)
                return -EINVAL;
        if (!dev)
                return -ENODEV;
index 596a0b4..4dff83d 100644 (file)
@@ -130,9 +130,6 @@ ep_matches (
                        num_req_streams = ep_comp->bmAttributes & 0x1f;
                        if (num_req_streams > ep->max_streams)
                                return 0;
-                       /* Update the ep_comp descriptor if needed */
-                       if (num_req_streams != ep->max_streams)
-                               ep_comp->bmAttributes = ep->max_streams;
                }
 
        }
index c39d588..1a6f415 100644 (file)
@@ -2975,6 +2975,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
        fsg_common_put(common);
        usb_free_descriptors(fsg->function.descriptors);
        usb_free_descriptors(fsg->function.hs_descriptors);
+       usb_free_descriptors(fsg->function.ss_descriptors);
        kfree(fsg);
 }
 
index 91fdf79..cf33a8d 100644 (file)
@@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
        }
        if (!gser->port.in->desc || !gser->port.out->desc) {
                DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
-               if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
-                   !config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+               if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+                   config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
                        gser->port.in->desc = NULL;
                        gser->port.out->desc = NULL;
                        return -EINVAL;
index 43a49ec..dcbc0a2 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/err.h>
 #include <linux/fsl_devices.h>
 #include <linux/platform_device.h>
+#include <linux/io.h>
 
 #include <mach/hardware.h>
 
@@ -88,7 +89,6 @@ eenahb:
 void fsl_udc_clk_finalize(struct platform_device *pdev)
 {
        struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-#if defined(CONFIG_SOC_IMX35)
        if (cpu_is_mx35()) {
                unsigned int v;
 
@@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev)
                                        USBPHYCTRL_OTGBASE_OFFSET));
                }
        }
-#endif
 
        /* ULPI transceivers don't need usbpll */
        if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
index 2a03e4d..e00cf92 100644 (file)
@@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
        if (!udc_controller)
                return -ENODEV;
 
-       if (!driver || (driver->speed != USB_SPEED_FULL
-                       && driver->speed != USB_SPEED_HIGH)
+       if (!driver || driver->speed < USB_SPEED_FULL
                        || !bind || !driver->disconnect || !driver->setup)
                return -EINVAL;
 
index b3b3d83..dd28ef3 100644 (file)
@@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
                kfree(req);
 }
 
-/*-------------------------------------------------------------------------*/
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+       struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+       /* Write dQH next pointer and terminate bit to 0 */
+       qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+                       & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+       /* Clear active and halt bit */
+       qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+                                       | EP_QUEUE_HEAD_STATUS_HALT));
+
+       /* Ensure that updates to the QH will occur before priming. */
+       wmb();
+
+       /* Prime endpoint by writing correct bit to ENDPTPRIME */
+       fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+                       : (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
 static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
 {
-       int i = ep_index(ep) * 2 + ep_is_in(ep);
        u32 temp, bitmask, tmp_stat;
-       struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
 
        /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
        VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
@@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
                        cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
                /* Read prime bit, if 1 goto done */
                if (fsl_readl(&dr_regs->endpointprime) & bitmask)
-                       goto out;
+                       return;
 
                do {
                        /* Set ATDTW bit in USBCMD */
@@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
                fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
 
                if (tmp_stat)
-                       goto out;
+                       return;
        }
 
-       /* Write dQH next pointer and terminate bit to 0 */
-       temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
-       dQH->next_dtd_ptr = cpu_to_hc32(temp);
-
-       /* Clear active and halt bit */
-       temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
-                       | EP_QUEUE_HEAD_STATUS_HALT));
-       dQH->size_ioc_int_sts &= temp;
-
-       /* Ensure that updates to the QH will occur before priming. */
-       wmb();
-
-       /* Prime endpoint by writing 1 to ENDPTPRIME */
-       temp = ep_is_in(ep)
-               ? (1 << (ep_index(ep) + 16))
-               : (1 << (ep_index(ep)));
-       fsl_writel(temp, &dr_regs->endpointprime);
-out:
-       return;
+       fsl_prime_ep(ep, req->head);
 }
 
 /* Fill in the dTD structure
@@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
                VDBG("%s, bad ep", __func__);
                return -EINVAL;
        }
-       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+       if (usb_endpoint_xfer_isoc(ep->desc)) {
                if (req->req.length > ep->ep.maxpacket)
                        return -EMSGSIZE;
        }
@@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 
                /* The request isn't the last request in this ep queue */
                if (req->queue.next != &ep->queue) {
-                       struct ep_queue_head *qh;
                        struct fsl_req *next_req;
 
-                       qh = ep->qh;
                        next_req = list_entry(req->queue.next, struct fsl_req,
                                        queue);
 
-                       /* Point the QH to the first TD of next request */
-                       fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
+                       /* prime with dTD of next request */
+                       fsl_prime_ep(ep, next_req->head);
                }
-
-               /* The request hasn't been processed, patch up the TD chain */
+       /* The request hasn't been processed, patch up the TD chain */
        } else {
                struct fsl_req *prev_req;
 
                prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
-               fsl_writel(fsl_readl(&req->tail->next_td_ptr),
-                               &prev_req->tail->next_td_ptr);
-
+               prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
        }
 
        done(ep, req, -ECONNRESET);
@@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
                goto out;
        }
 
-       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+       if (usb_endpoint_xfer_isoc(ep->desc)) {
                status = -EOPNOTSUPP;
                goto out;
        }
@@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
        struct fsl_udc *udc;
        int size = 0;
        u32 bitmask;
-       struct ep_queue_head *d_qh;
+       struct ep_queue_head *qh;
 
        ep = container_of(_ep, struct fsl_ep, ep);
        if (!_ep || (!ep->desc && ep_index(ep) != 0))
@@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
        if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
                return -ESHUTDOWN;
 
-       d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)];
+       qh = get_qh_by_ep(ep);
 
        bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
            (1 << (ep_index(ep)));
 
        if (fsl_readl(&dr_regs->endptstatus) & bitmask)
-               size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+               size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
                    >> DTD_LENGTH_BIT_POS;
 
        pr_debug("%s %u\n", __func__, size);
@@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
        if (!udc_controller)
                return -ENODEV;
 
-       if (!driver || (driver->speed != USB_SPEED_FULL
-                               && driver->speed != USB_SPEED_HIGH)
+       if (!driver || driver->speed < USB_SPEED_FULL
                        || !bind || !driver->disconnect || !driver->setup)
                return -EINVAL;
 
index 1d51be8..f781f5d 100644 (file)
@@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
                                        * 2 + ((windex & USB_DIR_IN) ? 1 : 0))
 #define get_pipe_by_ep(EP)     (ep_index(EP) * 2 + ep_is_in(EP))
 
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+       /* we only have one ep0 structure but two queue heads */
+       if (ep_index(ep) != 0)
+               return ep->qh;
+       else
+               return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+                               USB_DIR_IN) ? 1 : 0];
+}
+
 struct platform_device;
 #ifdef CONFIG_ARCH_MXC
 int fsl_udc_clk_init(struct platform_device *pdev);
index 91d0af2..9aa1cbb 100644 (file)
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
        int retval;
 
        if (!driver
-                       || driver->speed != USB_SPEED_HIGH
+                       || driver->speed < USB_SPEED_HIGH
                        || !bind
                        || !driver->setup)
                return -EINVAL;
index 7f1bc9a..da2b9d0 100644 (file)
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
         * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
         * "must not be used in normal operation"
         */
-       if (!driver || driver->speed != USB_SPEED_HIGH
+       if (!driver || driver->speed < USB_SPEED_HIGH
                        || !driver->setup)
                return -EINVAL;
 
index 24f84b2..fc719a3 100644 (file)
@@ -1746,7 +1746,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
        struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
 
        if (!driver
-                       || driver->speed != USB_SPEED_HIGH
+                       || driver->speed < USB_SPEED_HIGH
                        || !driver->setup)
                return -EINVAL;
        if (!r8a66597)
index a552453..b314482 100644 (file)
@@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
                return -EINVAL;
        }
 
-       if (driver->speed != USB_SPEED_HIGH &&
-           driver->speed != USB_SPEED_FULL) {
+       if (driver->speed < USB_SPEED_FULL)
                dev_err(hsotg->dev, "%s: bad speed\n", __func__);
-       }
 
        if (!bind || !driver->setup) {
                dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
index 8d54f89..20a553b 100644 (file)
@@ -1142,8 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
        int ret;
 
        if (!driver
-               || (driver->speed != USB_SPEED_FULL &&
-                       driver->speed != USB_SPEED_HIGH)
+               || driver->speed < USB_SPEED_FULL
                || !bind
                || !driver->unbind || !driver->disconnect || !driver->setup)
                return -EINVAL;
index 56a3203..a60679c 100644 (file)
@@ -1475,6 +1475,7 @@ iso_stream_schedule (
         * jump until after the queue is primed.
         */
        else {
+               int done = 0;
                start = SCHEDULE_SLOP + (now & ~0x07);
 
                /* NOTE:  assumes URB_ISO_ASAP, to limit complexity/bugs */
@@ -1492,18 +1493,18 @@ iso_stream_schedule (
                        if (stream->highspeed) {
                                if (itd_slot_ok(ehci, mod, start,
                                                stream->usecs, period))
-                                       break;
+                                       done = 1;
                        } else {
                                if ((start % 8) >= 6)
                                        continue;
                                if (sitd_slot_ok(ehci, mod, stream,
                                                start, sched, period))
-                                       break;
+                                       done = 1;
                        }
-               } while (start > next);
+               } while (start > next && !done);
 
                /* no room in the schedule */
-               if (start == next) {
+               if (!done) {
                        ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
                                urb, now, now + mod);
                        status = -ENOSPC;
index a7dc1e1..2ac4ac2 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "isp1760-hcd.h"
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -31,7 +31,7 @@
 #include <linux/pci.h>
 #endif
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 struct isp1760 {
        struct usb_hcd *hcd;
        int rst_gpio;
@@ -437,7 +437,7 @@ static int __init isp1760_init(void)
        ret = platform_driver_register(&isp1760_plat_driver);
        if (!ret)
                any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        ret = platform_driver_register(&isp1760_of_driver);
        if (!ret)
                any_ret = 0;
@@ -457,7 +457,7 @@ module_init(isp1760_init);
 static void __exit isp1760_exit(void)
 {
        platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        platform_driver_unregister(&isp1760_of_driver);
 #endif
 #ifdef CONFIG_PCI
index d6e1754..a403b53 100644 (file)
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
 {
        qset->td_start = qset->td_end = qset->ntds = 0;
 
-       qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+       qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
        qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
        qset->qh.err_count = 0;
        qset->qh.scratch[0] = 0;
index aa94c01..a1afb7c 100644 (file)
@@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
        ring = xhci->cmd_ring;
        seg = ring->deq_seg;
        do {
-               memset(seg->trbs, 0, SEGMENT_SIZE);
+               memset(seg->trbs, 0,
+                       sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+               seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+                       cpu_to_le32(~TRB_CYCLE);
                seg = seg->next;
        } while (seg != ring->deq_seg);
 
index c1fa12e..b63ab15 100644 (file)
@@ -2301,18 +2301,12 @@ static int musb_suspend(struct device *dev)
                 */
        }
 
-       musb_save_context(musb);
-
        spin_unlock_irqrestore(&musb->lock, flags);
        return 0;
 }
 
 static int musb_resume_noirq(struct device *dev)
 {
-       struct musb     *musb = dev_to_musb(dev);
-
-       musb_restore_context(musb);
-
        /* for static cmos like DaVinci, register values were preserved
         * unless for some reason the whole soc powered down or the USB
         * module got reset through the PSC (vs just being disabled).
index d51043a..922148f 100644 (file)
@@ -1903,7 +1903,7 @@ static int musb_gadget_start(struct usb_gadget *g,
        unsigned long           flags;
        int                     retval = -EINVAL;
 
-       if (driver->speed != USB_SPEED_HIGH)
+       if (driver->speed < USB_SPEED_HIGH)
                goto err0;
 
        pm_runtime_get_sync(musb->controller);
index 60ddba8..79cb0af 100644 (file)
@@ -774,6 +774,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
                        if (musb->double_buffer_not_ok)
                                musb_writew(epio, MUSB_TXMAXP,
                                                hw_ep->max_packet_sz_tx);
+                       else if (can_bulk_split(musb, qh->type))
+                               musb_writew(epio, MUSB_TXMAXP, packet_sz
+                                       | ((hw_ep->max_packet_sz_tx /
+                                               packet_sz) - 1) << 11);
                        else
                                musb_writew(epio, MUSB_TXMAXP,
                                                qh->maxpacket |
index 053f86d..ad96a38 100644 (file)
@@ -349,7 +349,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
                if (mod->irq_attch)
                        intenb1 |= ATTCHE;
 
-               if (mod->irq_attch)
+               if (mod->irq_dtch)
                        intenb1 |= DTCHE;
 
                if (mod->irq_sign)
index d9717e0..7f4e803 100644 (file)
@@ -751,53 +751,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
                struct usb_gadget_driver *driver)
 {
        struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
-       struct usbhs_priv *priv;
-       struct device *dev;
-       int ret;
+       struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
 
        if (!driver             ||
            !driver->setup      ||
-           driver->speed != USB_SPEED_HIGH)
+           driver->speed < USB_SPEED_FULL)
                return -EINVAL;
 
-       dev  = usbhsg_gpriv_to_dev(gpriv);
-       priv = usbhsg_gpriv_to_priv(gpriv);
-
        /* first hook up the driver ... */
        gpriv->driver = driver;
        gpriv->gadget.dev.driver = &driver->driver;
 
-       ret = device_add(&gpriv->gadget.dev);
-       if (ret) {
-               dev_err(dev, "device_add error %d\n", ret);
-               goto add_fail;
-       }
-
        return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
-
-add_fail:
-       gpriv->driver = NULL;
-       gpriv->gadget.dev.driver = NULL;
-
-       return ret;
 }
 
 static int usbhsg_gadget_stop(struct usb_gadget *gadget,
                struct usb_gadget_driver *driver)
 {
        struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
-       struct usbhs_priv *priv;
-       struct device *dev;
+       struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
 
        if (!driver             ||
            !driver->unbind)
                return -EINVAL;
 
-       dev  = usbhsg_gpriv_to_dev(gpriv);
-       priv = usbhsg_gpriv_to_priv(gpriv);
-
        usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
-       device_del(&gpriv->gadget.dev);
+       gpriv->gadget.dev.driver = NULL;
        gpriv->driver = NULL;
 
        return 0;
@@ -827,6 +806,13 @@ static int usbhsg_start(struct usbhs_priv *priv)
 
 static int usbhsg_stop(struct usbhs_priv *priv)
 {
+       struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+       /* cable disconnect */
+       if (gpriv->driver &&
+           gpriv->driver->disconnect)
+               gpriv->driver->disconnect(&gpriv->gadget);
+
        return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
 }
 
@@ -876,12 +862,14 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
        /*
         * init gadget
         */
-       device_initialize(&gpriv->gadget.dev);
        dev_set_name(&gpriv->gadget.dev, "gadget");
        gpriv->gadget.dev.parent        = dev;
        gpriv->gadget.name              = "renesas_usbhs_udc";
        gpriv->gadget.ops               = &usbhsg_gadget_ops;
        gpriv->gadget.is_dualspeed      = 1;
+       ret = device_register(&gpriv->gadget.dev);
+       if (ret < 0)
+               goto err_add_udc;
 
        INIT_LIST_HEAD(&gpriv->gadget.ep_list);
 
@@ -912,12 +900,15 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
 
        ret = usb_add_gadget_udc(dev, &gpriv->gadget);
        if (ret)
-               goto err_add_udc;
+               goto err_register;
 
 
        dev_info(dev, "gadget probed\n");
 
        return 0;
+
+err_register:
+       device_unregister(&gpriv->gadget.dev);
 err_add_udc:
        kfree(gpriv->uep);
 
@@ -933,6 +924,8 @@ void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
 
        usb_del_gadget_udc(&gpriv->gadget);
 
+       device_unregister(&gpriv->gadget.dev);
+
        usbhsg_controller_unregister(gpriv);
 
        kfree(gpriv->uep);
index bade761..7955de5 100644 (file)
@@ -1267,6 +1267,7 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
                dev_err(dev, "Failed to create hcd\n");
                return -ENOMEM;
        }
+       hcd->has_tt = 1; /* for low/full speed */
 
        pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
        if (!pipe_info) {
index bd4298b..ff3db5d 100644 (file)
@@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
index 571fa96..055b64e 100644 (file)
 
 /* Propox devices */
 #define FTDI_PROPOX_JTAGCABLEII_PID    0xD738
+#define FTDI_PROPOX_ISPCABLEIII_PID    0xD739
 
 /* Lenz LI-USB Computer Interface. */
 #define FTDI_LENZ_LIUSB_PID    0xD780
index d865878..6dd6453 100644 (file)
@@ -661,6 +661,14 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) },  /* E398 3G Modem */
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) },  /* E398 3G PC UI Interface */
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) },  /* E398 3G Application Interface */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -747,6 +755,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
index 3041a97..24caba7 100644 (file)
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV(  0x1370, 0x6828, 0x0110, 0x0110,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Qinglin Ye <yestyle@gmail.com> */
+UNUSUAL_DEV(  0x13fe, 0x3600, 0x0100, 0x0100,
+               "Kingston",
+               "DT 101 G2",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BULK_IGNORE_TAG ),
+
 /* Reported by Francesco Foresti <frafore@tiscali.it> */
 UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
                "Super Top",
index 8e964b9..284798a 100644 (file)
@@ -166,7 +166,7 @@ retry:
        /*
         * Get IO TLB memory from any location.
         */
-       xen_io_tlb_start = alloc_bootmem(bytes);
+       xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
        if (!xen_io_tlb_start) {
                m = "Cannot allocate Xen-SWIOTLB buffer!\n";
                goto error;
@@ -179,7 +179,7 @@ retry:
                               bytes,
                               xen_io_tlb_nslabs);
        if (rc) {
-               free_bootmem(__pa(xen_io_tlb_start), bytes);
+               free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
                m = "Failed to get contiguous memory for DMA from Xen!\n"\
                    "You either: don't have the permissions, do not have"\
                    " enough free memory under 4GB, or the hypervisor memory"\
index b3b8f2f..ede860f 100644 (file)
@@ -621,15 +621,6 @@ static struct xenbus_watch *find_watch(const char *token)
        return NULL;
 }
 
-static void xs_reset_watches(void)
-{
-       int err;
-
-       err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
-       if (err && err != -EEXIST)
-               printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
-}
-
 /* Register callback to watch this node. */
 int register_xenbus_watch(struct xenbus_watch *watch)
 {
@@ -906,9 +897,5 @@ int xs_init(void)
        if (IS_ERR(task))
                return PTR_ERR(task);
 
-       /* shutdown watches for kexec boot */
-       if (xen_hvm_domain())
-               xs_reset_watches();
-
        return 0;
 }
index e24cd89..ea78c3a 100644 (file)
@@ -12,7 +12,7 @@ here.
 This directory is _NOT_ for adding arbitrary new firmware images. The
 place to add those is the separate linux-firmware repository:
 
-    git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git
+    git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
 
 That repository contains all these firmware images which have been
 extracted from older drivers, as well various new firmware images which
@@ -22,6 +22,7 @@ been permitted to redistribute under separate cover.
 To submit firmware to that repository, please send either a git binary
 diff or preferably a git pull request to:
       David Woodhouse <dwmw2@infradead.org>
+      Ben Hutchings <ben@decadent.org.uk>
 
 Your commit should include an update to the WHENCE file clearly
 identifying the licence under which the firmware is available, and
index 7ec1409..cb97174 100644 (file)
@@ -64,6 +64,8 @@ struct btrfs_worker_thread {
        int idle;
 };
 
+static int __btrfs_start_workers(struct btrfs_workers *workers);
+
 /*
  * btrfs_start_workers uses kthread_run, which can block waiting for memory
  * for a very long time.  It will actually throttle on page writeback,
@@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
 {
        struct worker_start *start;
        start = container_of(work, struct worker_start, work);
-       btrfs_start_workers(start->queue, 1);
+       __btrfs_start_workers(start->queue);
        kfree(start);
 }
 
-static int start_new_worker(struct btrfs_workers *queue)
-{
-       struct worker_start *start;
-       int ret;
-
-       start = kzalloc(sizeof(*start), GFP_NOFS);
-       if (!start)
-               return -ENOMEM;
-
-       start->work.func = start_new_worker_func;
-       start->queue = queue;
-       ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
-       if (ret)
-               kfree(start);
-       return ret;
-}
-
 /*
  * helper function to move a thread onto the idle list after it
  * has finished some requests.
@@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
 static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 {
        struct btrfs_workers *workers = worker->workers;
+       struct worker_start *start;
        unsigned long flags;
 
        rmb();
        if (!workers->atomic_start_pending)
                return;
 
+       start = kzalloc(sizeof(*start), GFP_NOFS);
+       if (!start)
+               return;
+
+       start->work.func = start_new_worker_func;
+       start->queue = workers;
+
        spin_lock_irqsave(&workers->lock, flags);
        if (!workers->atomic_start_pending)
                goto out;
@@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 
        workers->num_workers_starting += 1;
        spin_unlock_irqrestore(&workers->lock, flags);
-       start_new_worker(workers);
+       btrfs_queue_worker(workers->atomic_worker_start, &start->work);
        return;
 
 out:
+       kfree(start);
        spin_unlock_irqrestore(&workers->lock, flags);
 }
 
@@ -331,7 +325,7 @@ again:
                        run_ordered_completions(worker->workers, work);
 
                        check_pending_worker_creates(worker);
-
+                       cond_resched();
                }
 
                spin_lock_irq(&worker->lock);
@@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
  * starts new worker threads.  This does not enforce the max worker
  * count in case you need to temporarily go past it.
  */
-static int __btrfs_start_workers(struct btrfs_workers *workers,
-                                int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers)
 {
        struct btrfs_worker_thread *worker;
        int ret = 0;
-       int i;
 
-       for (i = 0; i < num_workers; i++) {
-               worker = kzalloc(sizeof(*worker), GFP_NOFS);
-               if (!worker) {
-                       ret = -ENOMEM;
-                       goto fail;
-               }
+       worker = kzalloc(sizeof(*worker), GFP_NOFS);
+       if (!worker) {
+               ret = -ENOMEM;
+               goto fail;
+       }
 
-               INIT_LIST_HEAD(&worker->pending);
-               INIT_LIST_HEAD(&worker->prio_pending);
-               INIT_LIST_HEAD(&worker->worker_list);
-               spin_lock_init(&worker->lock);
-
-               atomic_set(&worker->num_pending, 0);
-               atomic_set(&worker->refs, 1);
-               worker->workers = workers;
-               worker->task = kthread_run(worker_loop, worker,
-                                          "btrfs-%s-%d", workers->name,
-                                          workers->num_workers + i);
-               if (IS_ERR(worker->task)) {
-                       ret = PTR_ERR(worker->task);
-                       kfree(worker);
-                       goto fail;
-               }
-               spin_lock_irq(&workers->lock);
-               list_add_tail(&worker->worker_list, &workers->idle_list);
-               worker->idle = 1;
-               workers->num_workers++;
-               workers->num_workers_starting--;
-               WARN_ON(workers->num_workers_starting < 0);
-               spin_unlock_irq(&workers->lock);
+       INIT_LIST_HEAD(&worker->pending);
+       INIT_LIST_HEAD(&worker->prio_pending);
+       INIT_LIST_HEAD(&worker->worker_list);
+       spin_lock_init(&worker->lock);
+
+       atomic_set(&worker->num_pending, 0);
+       atomic_set(&worker->refs, 1);
+       worker->workers = workers;
+       worker->task = kthread_run(worker_loop, worker,
+                                  "btrfs-%s-%d", workers->name,
+                                  workers->num_workers + 1);
+       if (IS_ERR(worker->task)) {
+               ret = PTR_ERR(worker->task);
+               kfree(worker);
+               goto fail;
        }
+       spin_lock_irq(&workers->lock);
+       list_add_tail(&worker->worker_list, &workers->idle_list);
+       worker->idle = 1;
+       workers->num_workers++;
+       workers->num_workers_starting--;
+       WARN_ON(workers->num_workers_starting < 0);
+       spin_unlock_irq(&workers->lock);
+
        return 0;
 fail:
-       btrfs_stop_workers(workers);
+       spin_lock_irq(&workers->lock);
+       workers->num_workers_starting--;
+       spin_unlock_irq(&workers->lock);
        return ret;
 }
 
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+int btrfs_start_workers(struct btrfs_workers *workers)
 {
        spin_lock_irq(&workers->lock);
-       workers->num_workers_starting += num_workers;
+       workers->num_workers_starting++;
        spin_unlock_irq(&workers->lock);
-       return __btrfs_start_workers(workers, num_workers);
+       return __btrfs_start_workers(workers);
 }
 
 /*
@@ -568,6 +561,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
        struct btrfs_worker_thread *worker;
        unsigned long flags;
        struct list_head *fallback;
+       int ret;
 
 again:
        spin_lock_irqsave(&workers->lock, flags);
@@ -584,7 +578,9 @@ again:
                        workers->num_workers_starting++;
                        spin_unlock_irqrestore(&workers->lock, flags);
                        /* we're below the limit, start another worker */
-                       __btrfs_start_workers(workers, 1);
+                       ret = __btrfs_start_workers(workers);
+                       if (ret)
+                               goto fallback;
                        goto again;
                }
        }
@@ -665,7 +661,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
 /*
  * places a struct btrfs_work into the pending queue of one of the kthreads
  */
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 {
        struct btrfs_worker_thread *worker;
        unsigned long flags;
@@ -673,7 +669,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 
        /* don't requeue something already on a list */
        if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
-               goto out;
+               return;
 
        worker = find_worker(workers);
        if (workers->ordered) {
@@ -712,7 +708,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
        if (wake)
                wake_up_process(worker->task);
        spin_unlock_irqrestore(&worker->lock, flags);
-
-out:
-       return 0;
 }
index 5077746..f34cc31 100644 (file)
@@ -109,8 +109,8 @@ struct btrfs_workers {
        char *name;
 };
 
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
+int btrfs_start_workers(struct btrfs_workers *workers);
 int btrfs_stop_workers(struct btrfs_workers *workers);
 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
                        struct btrfs_workers *async_starter);
index 50634ab..6738503 100644 (file)
@@ -2692,7 +2692,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode, int flags);
+int btrfs_dirty_inode(struct inode *inode);
+int btrfs_update_time(struct file *file);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
index 5b16357..9c1eccc 100644 (file)
@@ -640,8 +640,8 @@ static int btrfs_delayed_inode_reserve_metadata(
         * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
         * we're accounted for.
         */
-       if (!trans->bytes_reserved &&
-           src_rsv != &root->fs_info->delalloc_block_rsv) {
+       if (!src_rsv || (!trans->bytes_reserved &&
+           src_rsv != &root->fs_info->delalloc_block_rsv)) {
                ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
                /*
                 * Since we're under a transaction reserve_metadata_bytes could
index 632f8f3..f44b392 100644 (file)
@@ -2194,19 +2194,27 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        fs_info->endio_meta_write_workers.idle_thresh = 2;
        fs_info->readahead_workers.idle_thresh = 2;
 
-       btrfs_start_workers(&fs_info->workers, 1);
-       btrfs_start_workers(&fs_info->generic_worker, 1);
-       btrfs_start_workers(&fs_info->submit_workers, 1);
-       btrfs_start_workers(&fs_info->delalloc_workers, 1);
-       btrfs_start_workers(&fs_info->fixup_workers, 1);
-       btrfs_start_workers(&fs_info->endio_workers, 1);
-       btrfs_start_workers(&fs_info->endio_meta_workers, 1);
-       btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
-       btrfs_start_workers(&fs_info->endio_write_workers, 1);
-       btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
-       btrfs_start_workers(&fs_info->delayed_workers, 1);
-       btrfs_start_workers(&fs_info->caching_workers, 1);
-       btrfs_start_workers(&fs_info->readahead_workers, 1);
+       /*
+        * btrfs_start_workers can really only fail because of ENOMEM so just
+        * return -ENOMEM if any of these fail.
+        */
+       ret = btrfs_start_workers(&fs_info->workers);
+       ret |= btrfs_start_workers(&fs_info->generic_worker);
+       ret |= btrfs_start_workers(&fs_info->submit_workers);
+       ret |= btrfs_start_workers(&fs_info->delalloc_workers);
+       ret |= btrfs_start_workers(&fs_info->fixup_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_write_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
+       ret |= btrfs_start_workers(&fs_info->delayed_workers);
+       ret |= btrfs_start_workers(&fs_info->caching_workers);
+       ret |= btrfs_start_workers(&fs_info->readahead_workers);
+       if (ret) {
+               ret = -ENOMEM;
+               goto fail_sb_buffer;
+       }
 
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
index f0d5718..f5fbe57 100644 (file)
@@ -2822,7 +2822,7 @@ out_free:
        btrfs_release_path(path);
 out:
        spin_lock(&block_group->lock);
-       if (!ret)
+       if (!ret && dcs == BTRFS_DC_SETUP)
                block_group->cache_generation = trans->transid;
        block_group->disk_cache_state = dcs;
        spin_unlock(&block_group->lock);
@@ -4204,12 +4204,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
        u64 to_reserve = 0;
+       u64 csum_bytes;
        unsigned nr_extents = 0;
+       int extra_reserve = 0;
        int flush = 1;
        int ret;
 
+       /* Need to be holding the i_mutex here if we aren't free space cache */
        if (btrfs_is_free_space_inode(root, inode))
                flush = 0;
+       else
+               WARN_ON(!mutex_is_locked(&inode->i_mutex));
 
        if (flush && btrfs_transaction_in_commit(root->fs_info))
                schedule_timeout(1);
@@ -4220,11 +4225,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        BTRFS_I(inode)->outstanding_extents++;
 
        if (BTRFS_I(inode)->outstanding_extents >
-           BTRFS_I(inode)->reserved_extents) {
+           BTRFS_I(inode)->reserved_extents)
                nr_extents = BTRFS_I(inode)->outstanding_extents -
                        BTRFS_I(inode)->reserved_extents;
-               BTRFS_I(inode)->reserved_extents += nr_extents;
-       }
 
        /*
         * Add an item to reserve for updating the inode when we complete the
@@ -4232,11 +4235,12 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
         */
        if (!BTRFS_I(inode)->delalloc_meta_reserved) {
                nr_extents++;
-               BTRFS_I(inode)->delalloc_meta_reserved = 1;
+               extra_reserve = 1;
        }
 
        to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
        to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
+       csum_bytes = BTRFS_I(inode)->csum_bytes;
        spin_unlock(&BTRFS_I(inode)->lock);
 
        ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -4246,22 +4250,35 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
                spin_lock(&BTRFS_I(inode)->lock);
                dropped = drop_outstanding_extent(inode);
-               to_free = calc_csum_metadata_size(inode, num_bytes, 0);
-               spin_unlock(&BTRFS_I(inode)->lock);
-               to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
                /*
-                * Somebody could have come in and twiddled with the
-                * reservation, so if we have to free more than we would have
-                * reserved from this reservation go ahead and release those
-                * bytes.
+                * If the inodes csum_bytes is the same as the original
+                * csum_bytes then we know we haven't raced with any free()ers
+                * so we can just reduce our inodes csum bytes and carry on.
+                * Otherwise we have to do the normal free thing to account for
+                * the case that the free side didn't free up its reserve
+                * because of this outstanding reservation.
                 */
-               to_free -= to_reserve;
+               if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+                       calc_csum_metadata_size(inode, num_bytes, 0);
+               else
+                       to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+               spin_unlock(&BTRFS_I(inode)->lock);
+               if (dropped)
+                       to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
                if (to_free)
                        btrfs_block_rsv_release(root, block_rsv, to_free);
                return ret;
        }
 
+       spin_lock(&BTRFS_I(inode)->lock);
+       if (extra_reserve) {
+               BTRFS_I(inode)->delalloc_meta_reserved = 1;
+               nr_extents--;
+       }
+       BTRFS_I(inode)->reserved_extents += nr_extents;
+       spin_unlock(&BTRFS_I(inode)->lock);
+
        block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
        return 0;
@@ -5107,11 +5124,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        struct btrfs_root *root = orig_root->fs_info->extent_root;
        struct btrfs_free_cluster *last_ptr = NULL;
        struct btrfs_block_group_cache *block_group = NULL;
+       struct btrfs_block_group_cache *used_block_group;
        int empty_cluster = 2 * 1024 * 1024;
        int allowed_chunk_alloc = 0;
        int done_chunk_alloc = 0;
        struct btrfs_space_info *space_info;
-       int last_ptr_loop = 0;
        int loop = 0;
        int index = 0;
        int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
@@ -5173,6 +5190,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
 ideal_cache:
                block_group = btrfs_lookup_block_group(root->fs_info,
                                                       search_start);
+               used_block_group = block_group;
                /*
                 * we don't want to use the block group if it doesn't match our
                 * allocation bits, or if its not cached.
@@ -5210,6 +5228,7 @@ search:
                u64 offset;
                int cached;
 
+               used_block_group = block_group;
                btrfs_get_block_group(block_group);
                search_start = block_group->key.objectid;
 
@@ -5286,71 +5305,62 @@ alloc:
                spin_unlock(&block_group->free_space_ctl->tree_lock);
 
                /*
-                * Ok we want to try and use the cluster allocator, so lets look
-                * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
-                * have tried the cluster allocator plenty of times at this
-                * point and not have found anything, so we are likely way too
-                * fragmented for the clustering stuff to find anything, so lets
-                * just skip it and let the allocator find whatever block it can
-                * find
+                * Ok we want to try and use the cluster allocator, so
+                * lets look there
                 */
-               if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
+               if (last_ptr) {
                        /*
                         * the refill lock keeps out other
                         * people trying to start a new cluster
                         */
                        spin_lock(&last_ptr->refill_lock);
-                       if (!last_ptr->block_group ||
-                           last_ptr->block_group->ro ||
-                           !block_group_bits(last_ptr->block_group, data))
+                       used_block_group = last_ptr->block_group;
+                       if (used_block_group != block_group &&
+                           (!used_block_group ||
+                            used_block_group->ro ||
+                            !block_group_bits(used_block_group, data))) {
+                               used_block_group = block_group;
                                goto refill_cluster;
+                       }
+
+                       if (used_block_group != block_group)
+                               btrfs_get_block_group(used_block_group);
 
-                       offset = btrfs_alloc_from_cluster(block_group, last_ptr,
-                                                num_bytes, search_start);
+                       offset = btrfs_alloc_from_cluster(used_block_group,
+                         last_ptr, num_bytes, used_block_group->key.objectid);
                        if (offset) {
                                /* we have a block, we're done */
                                spin_unlock(&last_ptr->refill_lock);
                                goto checks;
                        }
 
-                       spin_lock(&last_ptr->lock);
-                       /*
-                        * whoops, this cluster doesn't actually point to
-                        * this block group.  Get a ref on the block
-                        * group is does point to and try again
-                        */
-                       if (!last_ptr_loop && last_ptr->block_group &&
-                           last_ptr->block_group != block_group &&
-                           index <=
-                                get_block_group_index(last_ptr->block_group)) {
-
-                               btrfs_put_block_group(block_group);
-                               block_group = last_ptr->block_group;
-                               btrfs_get_block_group(block_group);
-                               spin_unlock(&last_ptr->lock);
-                               spin_unlock(&last_ptr->refill_lock);
-
-                               last_ptr_loop = 1;
-                               search_start = block_group->key.objectid;
-                               /*
-                                * we know this block group is properly
-                                * in the list because
-                                * btrfs_remove_block_group, drops the
-                                * cluster before it removes the block
-                                * group from the list
-                                */
-                               goto have_block_group;
+                       WARN_ON(last_ptr->block_group != used_block_group);
+                       if (used_block_group != block_group) {
+                               btrfs_put_block_group(used_block_group);
+                               used_block_group = block_group;
                        }
-                       spin_unlock(&last_ptr->lock);
 refill_cluster:
+                       BUG_ON(used_block_group != block_group);
+                       /* If we are on LOOP_NO_EMPTY_SIZE, we can't
+                        * set up a new clusters, so lets just skip it
+                        * and let the allocator find whatever block
+                        * it can find.  If we reach this point, we
+                        * will have tried the cluster allocator
+                        * plenty of times and not have found
+                        * anything, so we are likely way too
+                        * fragmented for the clustering stuff to find
+                        * anything.  */
+                       if (loop >= LOOP_NO_EMPTY_SIZE) {
+                               spin_unlock(&last_ptr->refill_lock);
+                               goto unclustered_alloc;
+                       }
+
                        /*
                         * this cluster didn't work out, free it and
                         * start over
                         */
                        btrfs_return_cluster_to_free_space(NULL, last_ptr);
 
-                       last_ptr_loop = 0;
-
                        /* allocate a cluster in this block group */
                        ret = btrfs_find_space_cluster(trans, root,
                                               block_group, last_ptr,
@@ -5390,6 +5400,7 @@ refill_cluster:
                        goto loop;
                }
 
+unclustered_alloc:
                offset = btrfs_find_space_for_alloc(block_group, search_start,
                                                    num_bytes, empty_size);
                /*
@@ -5416,14 +5427,14 @@ checks:
                search_start = stripe_align(root, offset);
                /* move on to the next group */
                if (search_start + num_bytes >= search_end) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
                /* move on to the next group */
                if (search_start + num_bytes >
-                   block_group->key.objectid + block_group->key.offset) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                   used_block_group->key.objectid + used_block_group->key.offset) {
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
@@ -5431,14 +5442,14 @@ checks:
                ins->offset = num_bytes;
 
                if (offset < search_start)
-                       btrfs_add_free_space(block_group, offset,
+                       btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
                BUG_ON(offset > search_start);
 
-               ret = btrfs_update_reserved_bytes(block_group, num_bytes,
+               ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
                                                  alloc_type);
                if (ret == -EAGAIN) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
@@ -5447,15 +5458,19 @@ checks:
                ins->offset = num_bytes;
 
                if (offset < search_start)
-                       btrfs_add_free_space(block_group, offset,
+                       btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
                BUG_ON(offset > search_start);
+               if (used_block_group != block_group)
+                       btrfs_put_block_group(used_block_group);
                btrfs_put_block_group(block_group);
                break;
 loop:
                failed_cluster_refill = false;
                failed_alloc = false;
                BUG_ON(index != get_block_group_index(block_group));
+               if (used_block_group != block_group)
+                       btrfs_put_block_group(used_block_group);
                btrfs_put_block_group(block_group);
        }
        up_read(&space_info->groups_sem);
index be1bf62..49f3c9d 100644 (file)
@@ -935,8 +935,10 @@ again:
        node = tree_search(tree, start);
        if (!node) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = insert_state(tree, prealloc, start, end, &bits);
                prealloc = NULL;
                BUG_ON(err == -EEXIST);
@@ -992,8 +994,10 @@ hit_next:
         */
        if (state->start < start) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = split_state(tree, state, prealloc, start);
                BUG_ON(err == -EEXIST);
                prealloc = NULL;
@@ -1024,8 +1028,10 @@ hit_next:
                        this_end = last_start - 1;
 
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
 
                /*
                 * Avoid to free 'prealloc' if it can be merged with
@@ -1051,8 +1057,10 @@ hit_next:
         */
        if (state->start <= end && state->end > end) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
 
                err = split_state(tree, state, prealloc, end + 1);
                BUG_ON(err == -EEXIST);
index dafdfa0..97fbe93 100644 (file)
@@ -1167,6 +1167,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
        nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
                     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
                     (sizeof(struct page *)));
+       nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
+       nrptrs = max(nrptrs, 8);
        pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
        if (!pages)
                return -ENOMEM;
@@ -1387,7 +1389,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                goto out;
        }
 
-       file_update_time(file);
+       err = btrfs_update_time(file);
+       if (err) {
+               mutex_unlock(&inode->i_mutex);
+               goto out;
+       }
        BTRFS_I(inode)->sequence++;
 
        start_pos = round_down(pos, root->sectorsize);
index 2c984f7..0a6b928 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/falloc.h>
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
+#include <linux/mount.h>
 #include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -2031,7 +2032,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
        /* insert an orphan item to track this unlinked/truncated file */
        if (insert >= 1) {
                ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
-               BUG_ON(ret);
+               BUG_ON(ret && ret != -EEXIST);
        }
 
        /* insert an orphan item to track subvolume contains orphan files */
@@ -2158,6 +2159,38 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                if (ret && ret != -ESTALE)
                        goto out;
 
+               if (ret == -ESTALE && root == root->fs_info->tree_root) {
+                       struct btrfs_root *dead_root;
+                       struct btrfs_fs_info *fs_info = root->fs_info;
+                       int is_dead_root = 0;
+
+                       /*
+                        * this is an orphan in the tree root. Currently these
+                        * could come from 2 sources:
+                        *  a) a snapshot deletion in progress
+                        *  b) a free space cache inode
+                        * We need to distinguish those two, as the snapshot
+                        * orphan must not get deleted.
+                        * find_dead_roots already ran before us, so if this
+                        * is a snapshot deletion, we should find the root
+                        * in the dead_roots list
+                        */
+                       spin_lock(&fs_info->trans_lock);
+                       list_for_each_entry(dead_root, &fs_info->dead_roots,
+                                           root_list) {
+                               if (dead_root->root_key.objectid ==
+                                   found_key.objectid) {
+                                       is_dead_root = 1;
+                                       break;
+                               }
+                       }
+                       spin_unlock(&fs_info->trans_lock);
+                       if (is_dead_root) {
+                               /* prevent this orphan from being found again */
+                               key.offset = found_key.objectid - 1;
+                               continue;
+                       }
+               }
                /*
                 * Inode is already gone but the orphan item is still there,
                 * kill the orphan item.
@@ -2191,7 +2224,14 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                continue;
                        }
                        nr_truncate++;
+                       /*
+                        * Need to hold the imutex for reservation purposes, not
+                        * a huge deal here but I have a WARN_ON in
+                        * btrfs_delalloc_reserve_space to catch offenders.
+                        */
+                       mutex_lock(&inode->i_mutex);
                        ret = btrfs_truncate(inode);
+                       mutex_unlock(&inode->i_mutex);
                } else {
                        nr_unlink++;
                }
@@ -3327,7 +3367,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                        u64 hint_byte = 0;
                        hole_size = last_byte - cur_offset;
 
-                       trans = btrfs_start_transaction(root, 2);
+                       trans = btrfs_start_transaction(root, 3);
                        if (IS_ERR(trans)) {
                                err = PTR_ERR(trans);
                                break;
@@ -3337,6 +3377,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                                 cur_offset + hole_size,
                                                 &hint_byte, 1);
                        if (err) {
+                               btrfs_update_inode(trans, root, inode);
                                btrfs_end_transaction(trans, root);
                                break;
                        }
@@ -3346,6 +3387,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                        0, hole_size, 0, hole_size,
                                        0, 0, 0);
                        if (err) {
+                               btrfs_update_inode(trans, root, inode);
                                btrfs_end_transaction(trans, root);
                                break;
                        }
@@ -3353,6 +3395,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                        btrfs_drop_extent_cache(inode, hole_start,
                                        last_byte - 1, 0);
 
+                       btrfs_update_inode(trans, root, inode);
                        btrfs_end_transaction(trans, root);
                }
                free_extent_map(em);
@@ -3370,6 +3413,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
 
 static int btrfs_setsize(struct inode *inode, loff_t newsize)
 {
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_trans_handle *trans;
        loff_t oldsize = i_size_read(inode);
        int ret;
 
@@ -3377,16 +3422,19 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
                return 0;
 
        if (newsize > oldsize) {
-               i_size_write(inode, newsize);
-               btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
                truncate_pagecache(inode, oldsize, newsize);
                ret = btrfs_cont_expand(inode, oldsize, newsize);
-               if (ret) {
-                       btrfs_setsize(inode, oldsize);
+               if (ret)
                        return ret;
-               }
 
-               mark_inode_dirty(inode);
+               trans = btrfs_start_transaction(root, 1);
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
+
+               i_size_write(inode, newsize);
+               btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+               ret = btrfs_update_inode(trans, root, inode);
+               btrfs_end_transaction_throttle(trans, root);
        } else {
 
                /*
@@ -3426,9 +3474,9 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
 
        if (attr->ia_valid) {
                setattr_copy(inode, attr);
-               mark_inode_dirty(inode);
+               err = btrfs_dirty_inode(inode);
 
-               if (attr->ia_valid & ATTR_MODE)
+               if (!err && attr->ia_valid & ATTR_MODE)
                        err = btrfs_acl_chmod(inode);
        }
 
@@ -4204,42 +4252,80 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
  * FIXME, needs more benchmarking...there are no reasons other than performance
  * to keep or drop this code.
  */
-void btrfs_dirty_inode(struct inode *inode, int flags)
+int btrfs_dirty_inode(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        int ret;
 
        if (BTRFS_I(inode)->dummy_inode)
-               return;
+               return 0;
 
        trans = btrfs_join_transaction(root);
-       BUG_ON(IS_ERR(trans));
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
 
        ret = btrfs_update_inode(trans, root, inode);
        if (ret && ret == -ENOSPC) {
                /* whoops, lets try again with the full transaction */
                btrfs_end_transaction(trans, root);
                trans = btrfs_start_transaction(root, 1);
-               if (IS_ERR(trans)) {
-                       printk_ratelimited(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %llu error %ld\n",
-                                      (unsigned long long)btrfs_ino(inode),
-                                      PTR_ERR(trans));
-                       return;
-               }
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
 
                ret = btrfs_update_inode(trans, root, inode);
-               if (ret) {
-                       printk_ratelimited(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %llu error %d\n",
-                                      (unsigned long long)btrfs_ino(inode),
-                                      ret);
-               }
        }
        btrfs_end_transaction(trans, root);
        if (BTRFS_I(inode)->delayed_node)
                btrfs_balance_delayed_items(root);
+
+       return ret;
+}
+
+/*
+ * This is a copy of file_update_time.  We need this so we can return error on
+ * ENOSPC for updating the inode in the case of file write and mmap writes.
+ */
+int btrfs_update_time(struct file *file)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct timespec now;
+       int ret;
+       enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+
+       /* First try to exhaust all avenues to not sync */
+       if (IS_NOCMTIME(inode))
+               return 0;
+
+       now = current_fs_time(inode->i_sb);
+       if (!timespec_equal(&inode->i_mtime, &now))
+               sync_it = S_MTIME;
+
+       if (!timespec_equal(&inode->i_ctime, &now))
+               sync_it |= S_CTIME;
+
+       if (IS_I_VERSION(inode))
+               sync_it |= S_VERSION;
+
+       if (!sync_it)
+               return 0;
+
+       /* Finally allowed to write? Takes lock. */
+       if (mnt_want_write_file(file))
+               return 0;
+
+       /* Only change inode inside the lock region */
+       if (sync_it & S_VERSION)
+               inode_inc_iversion(inode);
+       if (sync_it & S_CTIME)
+               inode->i_ctime = now;
+       if (sync_it & S_MTIME)
+               inode->i_mtime = now;
+       ret = btrfs_dirty_inode(inode);
+       if (!ret)
+               mark_inode_dirty_sync(inode);
+       mnt_drop_write(file->f_path.mnt);
+       return ret;
 }
 
 /*
@@ -4555,11 +4641,18 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+
+       inode->i_op = &btrfs_special_inode_operations;
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
-               inode->i_op = &btrfs_special_inode_operations;
                init_special_inode(inode, inode->i_mode, rdev);
                btrfs_update_inode(trans, root, inode);
        }
@@ -4613,14 +4706,21 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+       inode->i_fop = &btrfs_file_operations;
+       inode->i_op = &btrfs_file_inode_operations;
+
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
                inode->i_mapping->a_ops = &btrfs_aops;
                inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-               inode->i_fop = &btrfs_file_operations;
-               inode->i_op = &btrfs_file_inode_operations;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
        }
 out_unlock:
@@ -6303,7 +6403,12 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        u64 page_start;
        u64 page_end;
 
+       /* Need this to keep space reservations serialized */
+       mutex_lock(&inode->i_mutex);
        ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
+       mutex_unlock(&inode->i_mutex);
+       if (!ret)
+               ret = btrfs_update_time(vma->vm_file);
        if (ret) {
                if (ret == -ENOMEM)
                        ret = VM_FAULT_OOM;
@@ -6515,8 +6620,9 @@ static int btrfs_truncate(struct inode *inode)
                        /* Just need the 1 for updating the inode */
                        trans = btrfs_start_transaction(root, 1);
                        if (IS_ERR(trans)) {
-                               err = PTR_ERR(trans);
-                               goto out;
+                               ret = err = PTR_ERR(trans);
+                               trans = NULL;
+                               break;
                        }
                }
 
@@ -7076,14 +7182,21 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+       inode->i_fop = &btrfs_file_operations;
+       inode->i_op = &btrfs_file_inode_operations;
+
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
                inode->i_mapping->a_ops = &btrfs_aops;
                inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-               inode->i_fop = &btrfs_file_operations;
-               inode->i_op = &btrfs_file_inode_operations;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
        }
        if (drop_inode)
@@ -7353,6 +7466,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
        .follow_link    = page_follow_link_light,
        .put_link       = page_put_link,
        .getattr        = btrfs_getattr,
+       .setattr        = btrfs_setattr,
        .permission     = btrfs_permission,
        .setxattr       = btrfs_setxattr,
        .getxattr       = btrfs_getxattr,
index 72d4616..c04f02c 100644 (file)
@@ -252,11 +252,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
        trans = btrfs_join_transaction(root);
        BUG_ON(IS_ERR(trans));
 
+       btrfs_update_iflags(inode);
+       inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, inode);
        BUG_ON(ret);
 
-       btrfs_update_iflags(inode);
-       inode->i_ctime = CURRENT_TIME;
        btrfs_end_transaction(trans, root);
 
        mnt_drop_write(file->f_path.mnt);
@@ -858,8 +858,10 @@ static int cluster_pages_for_defrag(struct inode *inode,
                return 0;
        file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
 
+       mutex_lock(&inode->i_mutex);
        ret = btrfs_delalloc_reserve_space(inode,
                                           num_pages << PAGE_CACHE_SHIFT);
+       mutex_unlock(&inode->i_mutex);
        if (ret)
                return ret;
 again:
index dff29d5..cfb5543 100644 (file)
@@ -2947,7 +2947,9 @@ static int relocate_file_extent_cluster(struct inode *inode,
        index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
        last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
        while (index <= last_index) {
+               mutex_lock(&inode->i_mutex);
                ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+               mutex_unlock(&inode->i_mutex);
                if (ret)
                        goto out;
 
index c27bcb6..ddf2c90 100644 (file)
@@ -1535,18 +1535,22 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
 static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
+       int ret = 0;
 
        mutex_lock(&fs_info->scrub_lock);
        if (fs_info->scrub_workers_refcnt == 0) {
                btrfs_init_workers(&fs_info->scrub_workers, "scrub",
                           fs_info->thread_pool_size, &fs_info->generic_worker);
                fs_info->scrub_workers.idle_thresh = 4;
-               btrfs_start_workers(&fs_info->scrub_workers, 1);
+               ret = btrfs_start_workers(&fs_info->scrub_workers);
+               if (ret)
+                       goto out;
        }
        ++fs_info->scrub_workers_refcnt;
+out:
        mutex_unlock(&fs_info->scrub_lock);
 
-       return 0;
+       return ret;
 }
 
 static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
index e28ad4b..200f63b 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/slab.h>
 #include <linux/cleancache.h>
 #include <linux/mnt_namespace.h>
+#include <linux/ratelimit.h>
 #include "compat.h"
 #include "delayed-inode.h"
 #include "ctree.h"
@@ -1053,7 +1054,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
        u64 avail_space;
        u64 used_space;
        u64 min_stripe_size;
-       int min_stripes = 1;
+       int min_stripes = 1, num_stripes = 1;
        int i = 0, nr_devices;
        int ret;
 
@@ -1067,12 +1068,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
 
        /* calc min stripe number for data space alloction */
        type = btrfs_get_alloc_profile(root, 1);
-       if (type & BTRFS_BLOCK_GROUP_RAID0)
+       if (type & BTRFS_BLOCK_GROUP_RAID0) {
                min_stripes = 2;
-       else if (type & BTRFS_BLOCK_GROUP_RAID1)
+               num_stripes = nr_devices;
+       } else if (type & BTRFS_BLOCK_GROUP_RAID1) {
                min_stripes = 2;
-       else if (type & BTRFS_BLOCK_GROUP_RAID10)
+               num_stripes = 2;
+       } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
                min_stripes = 4;
+               num_stripes = 4;
+       }
 
        if (type & BTRFS_BLOCK_GROUP_DUP)
                min_stripe_size = 2 * BTRFS_STRIPE_LEN;
@@ -1141,13 +1146,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
        i = nr_devices - 1;
        avail_space = 0;
        while (nr_devices >= min_stripes) {
+               if (num_stripes > nr_devices)
+                       num_stripes = nr_devices;
+
                if (devices_info[i].max_avail >= min_stripe_size) {
                        int j;
                        u64 alloc_size;
 
-                       avail_space += devices_info[i].max_avail * min_stripes;
+                       avail_space += devices_info[i].max_avail * num_stripes;
                        alloc_size = devices_info[i].max_avail;
-                       for (j = i + 1 - min_stripes; j <= i; j++)
+                       for (j = i + 1 - num_stripes; j <= i; j++)
                                devices_info[j].max_avail -= alloc_size;
                }
                i--;
@@ -1264,6 +1272,16 @@ static int btrfs_unfreeze(struct super_block *sb)
        return 0;
 }
 
+static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
+{
+       int ret;
+
+       ret = btrfs_dirty_inode(inode);
+       if (ret)
+               printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
+                                  "error %d\n", btrfs_ino(inode), ret);
+}
+
 static const struct super_operations btrfs_super_ops = {
        .drop_inode     = btrfs_drop_inode,
        .evict_inode    = btrfs_evict_inode,
@@ -1271,7 +1289,7 @@ static const struct super_operations btrfs_super_ops = {
        .sync_fs        = btrfs_sync_fs,
        .show_options   = btrfs_show_options,
        .write_inode    = btrfs_write_inode,
-       .dirty_inode    = btrfs_dirty_inode,
+       .dirty_inode    = btrfs_fs_dirty_inode,
        .alloc_inode    = btrfs_alloc_inode,
        .destroy_inode  = btrfs_destroy_inode,
        .statfs         = btrfs_statfs,
index c37433d..f4b839f 100644 (file)
@@ -295,6 +295,12 @@ loop_lock:
                        btrfs_requeue_work(&device->work);
                        goto done;
                }
+               /* unplug every 64 requests just for good measure */
+               if (batch_run % 64 == 0) {
+                       blk_finish_plug(&plug);
+                       blk_start_plug(&plug);
+                       sync_pending = 0;
+               }
        }
 
        cond_resched();
@@ -1611,7 +1617,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
                return -EINVAL;
 
-       bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
+       bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
                                  root->fs_info->bdev_holder);
        if (IS_ERR(bdev))
                return PTR_ERR(bdev);
@@ -3258,7 +3264,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
                 */
                if (atomic_read(&bbio->error) > bbio->max_errors) {
                        err = -EIO;
-               } else if (err) {
+               } else {
                        /*
                         * this bio is actually up to date, we didn't
                         * go over the max number of errors
index 4144caf..173b1d2 100644 (file)
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
        snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
 
        /* dirty the head */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_head_snapc == NULL)
                ci->i_head_snapc = ceph_get_snap_context(snapc);
        ++ci->i_wrbuffer_ref_head;
@@ -100,7 +100,7 @@ static int ceph_set_page_dirty(struct page *page)
             ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
             ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
             snapc, snapc->seq, snapc->num_snaps);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        /* now adjust page */
        spin_lock_irq(&mapping->tree_lock);
@@ -391,7 +391,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
        struct ceph_snap_context *snapc = NULL;
        struct ceph_cap_snap *capsnap = NULL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
                dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
                     capsnap->context, capsnap->dirty_pages);
@@ -407,7 +407,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
                dout(" head snapc %p has %d dirty pages\n",
                     snapc, ci->i_wrbuffer_ref_head);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return snapc;
 }
 
index 0f327c6..8b53193 100644 (file)
@@ -309,7 +309,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
 /*
  * Find ceph_cap for given mds, if any.
  *
- * Called with i_lock held.
+ * Called with i_ceph_lock held.
  */
 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 {
@@ -332,9 +332,9 @@ struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 {
        struct ceph_cap *cap;
 
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ci, mds);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return cap;
 }
 
@@ -361,15 +361,16 @@ static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
 
 int ceph_get_cap_mds(struct inode *inode)
 {
+       struct ceph_inode_info *ci = ceph_inode(inode);
        int mds;
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        mds = __ceph_get_cap_mds(ceph_inode(inode));
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return mds;
 }
 
 /*
- * Called under i_lock.
+ * Called under i_ceph_lock.
  */
 static void __insert_cap_node(struct ceph_inode_info *ci,
                              struct ceph_cap *new)
@@ -415,7 +416,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
  *
  * If I_FLUSH is set, leave the inode at the front of the list.
  *
- * Caller holds i_lock
+ * Caller holds i_ceph_lock
  *    -> we take mdsc->cap_delay_lock
  */
 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
@@ -457,7 +458,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
 /*
  * Cancel delayed work on cap.
  *
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
  */
 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
                               struct ceph_inode_info *ci)
@@ -532,14 +533,14 @@ int ceph_add_cap(struct inode *inode,
                wanted |= ceph_caps_for_mode(fmode);
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ci, mds);
        if (!cap) {
                if (new_cap) {
                        cap = new_cap;
                        new_cap = NULL;
                } else {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        new_cap = get_cap(mdsc, caps_reservation);
                        if (new_cap == NULL)
                                return -ENOMEM;
@@ -625,7 +626,7 @@ retry:
 
        if (fmode >= 0)
                __ceph_get_fmode(ci, fmode);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        wake_up_all(&ci->i_cap_wq);
        return 0;
 }
@@ -792,7 +793,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
        struct rb_node *p;
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
                cap = rb_entry(p, struct ceph_cap, ci_node);
                if (__cap_is_valid(cap) &&
@@ -801,7 +802,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
                        break;
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("ceph_caps_revoking %p %s = %d\n", inode,
             ceph_cap_string(mask), ret);
        return ret;
@@ -855,7 +856,7 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
 }
 
 /*
- * called under i_lock
+ * called under i_ceph_lock
  */
 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 {
@@ -865,7 +866,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 /*
  * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
  *
- * caller should hold i_lock.
+ * caller should hold i_ceph_lock.
  * caller will not hold session s_mutex if called from destroy_inode.
  */
 void __ceph_remove_cap(struct ceph_cap *cap)
@@ -1028,7 +1029,7 @@ static void __queue_cap_release(struct ceph_mds_session *session,
 
 /*
  * Queue cap releases when an inode is dropped from our cache.  Since
- * inode is about to be destroyed, there is no need for i_lock.
+ * inode is about to be destroyed, there is no need for i_ceph_lock.
  */
 void ceph_queue_caps_release(struct inode *inode)
 {
@@ -1049,7 +1050,7 @@ void ceph_queue_caps_release(struct inode *inode)
 
 /*
  * Send a cap msg on the given inode.  Update our caps state, then
- * drop i_lock and send the message.
+ * drop i_ceph_lock and send the message.
  *
  * Make note of max_size reported/requested from mds, revoked caps
  * that have now been implemented.
@@ -1061,13 +1062,13 @@ void ceph_queue_caps_release(struct inode *inode)
  * Return non-zero if delayed release, or we experienced an error
  * such that the caller should requeue + retry later.
  *
- * called with i_lock, then drops it.
+ * called with i_ceph_lock, then drops it.
  * caller should hold snap_rwsem (read), s_mutex.
  */
 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
                      int op, int used, int want, int retain, int flushing,
                      unsigned *pflush_tid)
-       __releases(cap->ci->vfs_inode->i_lock)
+       __releases(cap->ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = cap->ci;
        struct inode *inode = &ci->vfs_inode;
@@ -1170,7 +1171,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
                xattr_version = ci->i_xattrs.version;
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
                op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
@@ -1198,13 +1199,13 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
  * Unless @again is true, skip cap_snaps that were already sent to
  * the MDS (i.e., during this session).
  *
- * Called under i_lock.  Takes s_mutex as needed.
+ * Called under i_ceph_lock.  Takes s_mutex as needed.
  */
 void __ceph_flush_snaps(struct ceph_inode_info *ci,
                        struct ceph_mds_session **psession,
                        int again)
-               __releases(ci->vfs_inode->i_lock)
-               __acquires(ci->vfs_inode->i_lock)
+               __releases(ci->i_ceph_lock)
+               __acquires(ci->i_ceph_lock)
 {
        struct inode *inode = &ci->vfs_inode;
        int mds;
@@ -1261,7 +1262,7 @@ retry:
                        session = NULL;
                }
                if (!session) {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        mutex_lock(&mdsc->mutex);
                        session = __ceph_lookup_mds_session(mdsc, mds);
                        mutex_unlock(&mdsc->mutex);
@@ -1275,7 +1276,7 @@ retry:
                         * deletion or migration.  retry, and we'll
                         * get a better @mds value next time.
                         */
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        goto retry;
                }
 
@@ -1285,7 +1286,7 @@ retry:
                        list_del_init(&capsnap->flushing_item);
                list_add_tail(&capsnap->flushing_item,
                              &session->s_cap_snaps_flushing);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
                     inode, capsnap, capsnap->follows, capsnap->flush_tid);
@@ -1302,7 +1303,7 @@ retry:
                next_follows = capsnap->follows + 1;
                ceph_put_cap_snap(capsnap);
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                goto retry;
        }
 
@@ -1322,11 +1323,9 @@ out:
 
 static void ceph_flush_snaps(struct ceph_inode_info *ci)
 {
-       struct inode *inode = &ci->vfs_inode;
-
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __ceph_flush_snaps(ci, NULL, 0);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -1373,7 +1372,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
  * Add dirty inode to the flushing list.  Assigned a seq number so we
  * can wait for caps to flush without starving.
  *
- * Called under i_lock.
+ * Called under i_ceph_lock.
  */
 static int __mark_caps_flushing(struct inode *inode,
                                 struct ceph_mds_session *session)
@@ -1421,9 +1420,9 @@ static int try_nonblocking_invalidate(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
        u32 invalidating_gen = ci->i_rdcache_gen;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        invalidate_mapping_pages(&inode->i_data, 0, -1);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (inode->i_data.nrpages == 0 &&
            invalidating_gen == ci->i_rdcache_gen) {
@@ -1470,7 +1469,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
        if (mdsc->stopping)
                is_delayed = 1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (ci->i_ceph_flags & CEPH_I_FLUSH)
                flags |= CHECK_CAPS_FLUSH;
@@ -1480,7 +1479,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                __ceph_flush_snaps(ci, &session, 0);
        goto retry_locked;
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 retry_locked:
        file_wanted = __ceph_caps_file_wanted(ci);
        used = __ceph_caps_used(ci);
@@ -1634,7 +1633,7 @@ ack:
                        if (mutex_trylock(&session->s_mutex) == 0) {
                                dout("inverting session/ino locks on %p\n",
                                     session);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                                if (took_snap_rwsem) {
                                        up_read(&mdsc->snap_rwsem);
                                        took_snap_rwsem = 0;
@@ -1648,7 +1647,7 @@ ack:
                        if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
                                dout("inverting snap/in locks on %p\n",
                                     inode);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                                down_read(&mdsc->snap_rwsem);
                                took_snap_rwsem = 1;
                                goto retry;
@@ -1664,10 +1663,10 @@ ack:
                mds = cap->mds;  /* remember mds, so we don't repeat */
                sent++;
 
-               /* __send_cap drops i_lock */
+               /* __send_cap drops i_ceph_lock */
                delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
                                      retain, flushing, NULL);
-               goto retry; /* retake i_lock and restart our cap scan. */
+               goto retry; /* retake i_ceph_lock and restart our cap scan. */
        }
 
        /*
@@ -1681,7 +1680,7 @@ ack:
        else if (!is_delayed || force_requeue)
                __cap_delay_requeue(mdsc, ci);
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (queue_invalidate)
                ceph_queue_invalidate(inode);
@@ -1704,7 +1703,7 @@ static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
        int flushing = 0;
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
                dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
                goto out;
@@ -1716,7 +1715,7 @@ retry:
                int delayed;
 
                if (!session) {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        session = cap->session;
                        mutex_lock(&session->s_mutex);
                        goto retry;
@@ -1727,18 +1726,18 @@ retry:
 
                flushing = __mark_caps_flushing(inode, session);
 
-               /* __send_cap drops i_lock */
+               /* __send_cap drops i_ceph_lock */
                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
                                     cap->issued | cap->implemented, flushing,
                                     flush_tid);
                if (!delayed)
                        goto out_unlocked;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __cap_delay_requeue(mdsc, ci);
        }
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 out_unlocked:
        if (session && unlock_session)
                mutex_unlock(&session->s_mutex);
@@ -1753,7 +1752,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int i, ret = 1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        for (i = 0; i < CEPH_CAP_BITS; i++)
                if ((ci->i_flushing_caps & (1 << i)) &&
                    ci->i_cap_flush_tid[i] <= tid) {
@@ -1761,7 +1760,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
                        ret = 0;
                        break;
                }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -1868,10 +1867,10 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
                struct ceph_mds_client *mdsc =
                        ceph_sb_to_client(inode->i_sb)->mdsc;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (__ceph_caps_dirty(ci))
                        __cap_delay_requeue_front(mdsc, ci);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        return err;
 }
@@ -1894,7 +1893,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
                struct inode *inode = &ci->vfs_inode;
                struct ceph_cap *cap;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                cap = ci->i_auth_cap;
                if (cap && cap->session == session) {
                        dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
@@ -1904,7 +1903,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
                        pr_err("%p auth cap %p not mds%d ???\n", inode,
                               cap, session->s_mds);
                }
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 }
 
@@ -1921,7 +1920,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                struct ceph_cap *cap;
                int delayed = 0;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                cap = ci->i_auth_cap;
                if (cap && cap->session == session) {
                        dout("kick_flushing_caps %p cap %p %s\n", inode,
@@ -1932,14 +1931,14 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                                             cap->issued | cap->implemented,
                                             ci->i_flushing_caps, NULL);
                        if (delayed) {
-                               spin_lock(&inode->i_lock);
+                               spin_lock(&ci->i_ceph_lock);
                                __cap_delay_requeue(mdsc, ci);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                        }
                } else {
                        pr_err("%p auth cap %p not mds%d ???\n", inode,
                               cap, session->s_mds);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        }
 }
@@ -1952,7 +1951,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
        struct ceph_cap *cap;
        int delayed = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = ci->i_auth_cap;
        dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
             ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
@@ -1964,12 +1963,12 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
                                     cap->issued | cap->implemented,
                                     ci->i_flushing_caps, NULL);
                if (delayed) {
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        __cap_delay_requeue(mdsc, ci);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 }
 
@@ -1978,7 +1977,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
  * Take references to capabilities we hold, so that we don't release
  * them to the MDS prematurely.
  *
- * Protected by i_lock.
+ * Protected by i_ceph_lock.
  */
 static void __take_cap_refs(struct ceph_inode_info *ci, int got)
 {
@@ -2016,7 +2015,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
 
        dout("get_cap_refs %p need %s want %s\n", inode,
             ceph_cap_string(need), ceph_cap_string(want));
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /* make sure file is actually open */
        file_wanted = __ceph_caps_file_wanted(ci);
@@ -2077,7 +2076,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
                     ceph_cap_string(have), ceph_cap_string(need));
        }
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("get_cap_refs %p ret %d got %s\n", inode,
             ret, ceph_cap_string(*got));
        return ret;
@@ -2094,7 +2093,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
        int check = 0;
 
        /* do we need to explicitly request a larger max_size? */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if ((endoff >= ci->i_max_size ||
             endoff > (inode->i_size << 1)) &&
            endoff > ci->i_wanted_max_size) {
@@ -2103,7 +2102,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
                ci->i_wanted_max_size = endoff;
                check = 1;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (check)
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 }
@@ -2140,9 +2139,9 @@ retry:
  */
 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
 {
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __take_cap_refs(ci, caps);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2160,7 +2159,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
        int last = 0, put = 0, flushsnaps = 0, wake = 0;
        struct ceph_cap_snap *capsnap;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (had & CEPH_CAP_PIN)
                --ci->i_pin_ref;
        if (had & CEPH_CAP_FILE_RD)
@@ -2193,7 +2192,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
                                }
                        }
                }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
             last ? " last" : "", put ? " put" : "");
@@ -2225,7 +2224,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
        int found = 0;
        struct ceph_cap_snap *capsnap = NULL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_wrbuffer_ref -= nr;
        last = !ci->i_wrbuffer_ref;
 
@@ -2274,7 +2273,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                }
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (last) {
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -2291,7 +2290,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
  * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
  * actually be a revocation if it specifies a smaller cap set.)
  *
- * caller holds s_mutex and i_lock, we drop both.
+ * caller holds s_mutex and i_ceph_lock, we drop both.
  *
  * return value:
  *  0 - ok
@@ -2302,7 +2301,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
                             struct ceph_mds_session *session,
                             struct ceph_cap *cap,
                             struct ceph_buffer *xattr_buf)
-               __releases(inode->i_lock)
+               __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int mds = session->s_mds;
@@ -2453,7 +2452,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
        }
        BUG_ON(cap->issued & ~cap->implemented);
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (writeback)
                /*
                 * queue inode for writeback: we can't actually call
@@ -2483,7 +2482,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                                 struct ceph_mds_caps *m,
                                 struct ceph_mds_session *session,
                                 struct ceph_cap *cap)
-       __releases(inode->i_lock)
+       __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -2539,7 +2538,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
        wake_up_all(&ci->i_cap_wq);
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (drop)
                iput(inode);
 }
@@ -2562,7 +2561,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
        dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
             inode, ci, session->s_mds, follows);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
                if (capsnap->follows == follows) {
                        if (capsnap->flush_tid != flush_tid) {
@@ -2585,7 +2584,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
                             capsnap, capsnap->follows);
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (drop)
                iput(inode);
 }
@@ -2598,7 +2597,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
 static void handle_cap_trunc(struct inode *inode,
                             struct ceph_mds_caps *trunc,
                             struct ceph_mds_session *session)
-       __releases(inode->i_lock)
+       __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int mds = session->s_mds;
@@ -2617,7 +2616,7 @@ static void handle_cap_trunc(struct inode *inode,
             inode, mds, seq, truncate_size, truncate_seq);
        queue_trunc = ceph_fill_file_size(inode, issued,
                                          truncate_seq, truncate_size, size);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (queue_trunc)
                ceph_queue_vmtruncate(inode);
@@ -2646,7 +2645,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
        dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
             inode, ci, mds, mseq);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /* make sure we haven't seen a higher mseq */
        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
@@ -2690,7 +2689,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
        }
        /* else, we already released it */
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2745,9 +2744,9 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
        up_read(&mdsc->snap_rwsem);
 
        /* make sure we re-request max_size, if necessary */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_requested_max_size = 0;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2762,6 +2761,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        struct ceph_mds_client *mdsc = session->s_mdsc;
        struct super_block *sb = mdsc->fsc->sb;
        struct inode *inode;
+       struct ceph_inode_info *ci;
        struct ceph_cap *cap;
        struct ceph_mds_caps *h;
        int mds = session->s_mds;
@@ -2815,6 +2815,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
 
        /* lookup ino */
        inode = ceph_find_inode(sb, vino);
+       ci = ceph_inode(inode);
        dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
             vino.snap, inode);
        if (!inode) {
@@ -2844,16 +2845,16 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        }
 
        /* the rest require a cap */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ceph_inode(inode), mds);
        if (!cap) {
                dout(" no cap on %p ino %llx.%llx from mds%d\n",
                     inode, ceph_ino(inode), ceph_snap(inode), mds);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto flush_cap_releases;
        }
 
-       /* note that each of these drops i_lock for us */
+       /* note that each of these drops i_ceph_lock for us */
        switch (op) {
        case CEPH_CAP_OP_REVOKE:
        case CEPH_CAP_OP_GRANT:
@@ -2869,7 +2870,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                break;
 
        default:
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
                       ceph_cap_op_name(op));
        }
@@ -2962,13 +2963,13 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
        struct inode *inode = &ci->vfs_inode;
        int last = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
             ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
        BUG_ON(ci->i_nr_by_mode[fmode] == 0);
        if (--ci->i_nr_by_mode[fmode] == 0)
                last++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (last && ci->i_vino.snap == CEPH_NOSNAP)
                ceph_check_caps(ci, 0, NULL);
@@ -2991,7 +2992,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
        int used, dirty;
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        used = __ceph_caps_used(ci);
        dirty = __ceph_caps_dirty(ci);
 
@@ -3046,7 +3047,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
                             inode, cap, ceph_cap_string(cap->issued));
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -3061,7 +3062,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
 
        /*
         * force an record for the directory caps if we have a dentry lease.
-        * this is racy (can't take i_lock and d_lock together), but it
+        * this is racy (can't take i_ceph_lock and d_lock together), but it
         * doesn't have to be perfect; the mds will revoke anything we don't
         * release.
         */
index bca3948..3eeb976 100644 (file)
@@ -281,18 +281,18 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
        }
 
        /* can we use the dcache? */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if ((filp->f_pos == 2 || fi->dentry) &&
            !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
            ceph_snap(inode) != CEPH_SNAPDIR &&
            ceph_dir_test_complete(inode) &&
            __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                err = __dcache_readdir(filp, dirent, filldir);
                if (err != -EAGAIN)
                        return err;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        if (fi->dentry) {
                err = note_last_dentry(fi, fi->dentry->d_name.name,
@@ -428,12 +428,12 @@ more:
         * were released during the whole readdir, and we should have
         * the complete dir contents in our cache.
         */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_release_count == fi->dir_release_count) {
                ceph_dir_set_complete(inode);
                ci->i_max_offset = filp->f_pos;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("readdir %p filp %p done.\n", inode, filp);
        return 0;
@@ -607,7 +607,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                struct ceph_inode_info *ci = ceph_inode(dir);
                struct ceph_dentry_info *di = ceph_dentry(dentry);
 
-               spin_lock(&dir->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
                if (strncmp(dentry->d_name.name,
                            fsc->mount_options->snapdir_name,
@@ -615,13 +615,13 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                    !is_root_ceph_dentry(dir, dentry) &&
                    ceph_dir_test_complete(dir) &&
                    (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
-                       spin_unlock(&dir->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        dout(" dir %p complete, -ENOENT\n", dir);
                        d_add(dentry, NULL);
                        di->lease_shared_gen = ci->i_shared_gen;
                        return NULL;
                }
-               spin_unlock(&dir->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 
        op = ceph_snap(dir) == CEPH_SNAPDIR ?
@@ -841,12 +841,12 @@ static int drop_caps_for_unlink(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (inode->i_nlink == 1) {
                drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
                ci->i_ceph_flags |= CEPH_I_NODELAY;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return drop;
 }
 
@@ -1015,10 +1015,10 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
        struct ceph_dentry_info *di = ceph_dentry(dentry);
        int valid = 0;
 
-       spin_lock(&dir->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_shared_gen == di->lease_shared_gen)
                valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
-       spin_unlock(&dir->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
             dir, (unsigned)ci->i_shared_gen, dentry,
             (unsigned)di->lease_shared_gen, valid);
index ce549d3..ed72428 100644 (file)
@@ -147,9 +147,9 @@ int ceph_open(struct inode *inode, struct file *file)
 
        /* trivially open snapdir */
        if (ceph_snap(inode) == CEPH_SNAPDIR) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return ceph_init_file(inode, file, fmode);
        }
 
@@ -158,7 +158,7 @@ int ceph_open(struct inode *inode, struct file *file)
         * write) or any MDS (for read).  Update wanted set
         * asynchronously.
         */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (__ceph_is_any_real_caps(ci) &&
            (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
                int mds_wanted = __ceph_caps_mds_wanted(ci);
@@ -168,7 +168,7 @@ int ceph_open(struct inode *inode, struct file *file)
                     inode, fmode, ceph_cap_string(wanted),
                     ceph_cap_string(issued));
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                /* adjust wanted? */
                if ((issued & wanted) != wanted &&
@@ -180,10 +180,10 @@ int ceph_open(struct inode *inode, struct file *file)
        } else if (ceph_snap(inode) != CEPH_NOSNAP &&
                   (ci->i_snap_caps & wanted) == wanted) {
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return ceph_init_file(inode, file, fmode);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
        req = prepare_open_request(inode->i_sb, flags, 0);
@@ -743,9 +743,9 @@ retry_snap:
                 */
                int dirty;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                ceph_put_cap_refs(ci, got);
 
                ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
@@ -764,9 +764,9 @@ retry_snap:
 
        if (ret >= 0) {
                int dirty;
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                if (dirty)
                        __mark_inode_dirty(inode, dirty);
        }
@@ -797,7 +797,8 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
 
        mutex_lock(&inode->i_mutex);
        __ceph_do_pending_vmtruncate(inode);
-       if (origin != SEEK_CUR || origin != SEEK_SET) {
+
+       if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
                ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
                if (ret < 0) {
                        offset = ret;
index 116f365..87fb132 100644 (file)
@@ -297,6 +297,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
 
        dout("alloc_inode %p\n", &ci->vfs_inode);
 
+       spin_lock_init(&ci->i_ceph_lock);
+
        ci->i_version = 0;
        ci->i_time_warp_seq = 0;
        ci->i_ceph_flags = 0;
@@ -583,7 +585,7 @@ static int fill_inode(struct inode *inode,
                               iinfo->xattr_len);
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /*
         * provided version will be odd if inode value is projected,
@@ -680,7 +682,7 @@ static int fill_inode(struct inode *inode,
                        char *sym;
 
                        BUG_ON(symlen != inode->i_size);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
 
                        err = -ENOMEM;
                        sym = kmalloc(symlen+1, GFP_NOFS);
@@ -689,7 +691,7 @@ static int fill_inode(struct inode *inode,
                        memcpy(sym, iinfo->symlink, symlen);
                        sym[symlen] = 0;
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (!ci->i_symlink)
                                ci->i_symlink = sym;
                        else
@@ -715,7 +717,7 @@ static int fill_inode(struct inode *inode,
        }
 
 no_change:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        /* queue truncate if we saw i_size decrease */
        if (queue_trunc)
@@ -750,13 +752,13 @@ no_change:
                                     info->cap.flags,
                                     caps_reservation);
                } else {
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        dout(" %p got snap_caps %s\n", inode,
                             ceph_cap_string(le32_to_cpu(info->cap.caps)));
                        ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
                        if (cap_fmode >= 0)
                                __ceph_get_fmode(ci, cap_fmode);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        } else if (cap_fmode >= 0) {
                pr_warning("mds issued no caps on %llx.%llx\n",
@@ -849,19 +851,20 @@ static void ceph_set_dentry_offset(struct dentry *dn)
 {
        struct dentry *dir = dn->d_parent;
        struct inode *inode = dir->d_inode;
+       struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_dentry_info *di;
 
        BUG_ON(!inode);
 
        di = ceph_dentry(dn);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (!ceph_dir_test_complete(inode)) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return;
        }
        di->offset = ceph_inode(inode)->i_max_offset++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        spin_lock(&dir->d_lock);
        spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
@@ -1308,7 +1311,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
        inode->i_size = size;
        inode->i_blocks = (size + (1 << 9) - 1) >> 9;
@@ -1318,7 +1321,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
            (ci->i_reported_size << 1) < ci->i_max_size)
                ret = 1;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -1376,20 +1379,20 @@ static void ceph_invalidate_work(struct work_struct *work)
        u32 orig_gen;
        int check = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("invalidate_pages %p gen %d revoking %d\n", inode,
             ci->i_rdcache_gen, ci->i_rdcache_revoking);
        if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
                /* nevermind! */
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto out;
        }
        orig_gen = ci->i_rdcache_gen;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        truncate_inode_pages(&inode->i_data, 0);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (orig_gen == ci->i_rdcache_gen &&
            orig_gen == ci->i_rdcache_revoking) {
                dout("invalidate_pages %p gen %d successful\n", inode,
@@ -1401,7 +1404,7 @@ static void ceph_invalidate_work(struct work_struct *work)
                     inode, orig_gen, ci->i_rdcache_gen,
                     ci->i_rdcache_revoking);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (check)
                ceph_check_caps(ci, 0, NULL);
@@ -1460,10 +1463,10 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
        int wrbuffer_refs, wake = 0;
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_truncate_pending == 0) {
                dout("__do_pending_vmtruncate %p none pending\n", inode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return;
        }
 
@@ -1474,7 +1477,7 @@ retry:
        if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
                dout("__do_pending_vmtruncate %p flushing snaps first\n",
                     inode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                filemap_write_and_wait_range(&inode->i_data, 0,
                                             inode->i_sb->s_maxbytes);
                goto retry;
@@ -1484,15 +1487,15 @@ retry:
        wrbuffer_refs = ci->i_wrbuffer_ref;
        dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
             ci->i_truncate_pending, to);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        truncate_inode_pages(inode->i_mapping, to);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_truncate_pending--;
        if (ci->i_truncate_pending == 0)
                wake = 1;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (wrbuffer_refs == 0)
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -1547,7 +1550,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        issued = __ceph_caps_issued(ci, NULL);
        dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
 
@@ -1695,7 +1698,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        }
 
        release &= issued;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (inode_dirty_flags)
                __mark_inode_dirty(inode, inode_dirty_flags);
@@ -1717,7 +1720,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        __ceph_do_pending_vmtruncate(inode);
        return err;
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        ceph_mdsc_put_request(req);
        return err;
 }
index 5a14c29..790914a 100644 (file)
@@ -241,11 +241,11 @@ static long ceph_ioctl_lazyio(struct file *file)
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                ci->i_nr_by_mode[fi->fmode]--;
                fi->fmode |= CEPH_FILE_MODE_LAZY;
                ci->i_nr_by_mode[fi->fmode]++;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                dout("ioctl_layzio: file %p marked lazy\n", file);
 
                ceph_check_caps(ci, 0, NULL);
index 264ab70..6203d80 100644 (file)
@@ -732,21 +732,21 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                }
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = NULL;
        if (mode == USE_AUTH_MDS)
                cap = ci->i_auth_cap;
        if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
                cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
        if (!cap) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto random;
        }
        mds = cap->session->s_mds;
        dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
             inode, ceph_vinop(inode), mds,
             cap == ci->i_auth_cap ? "auth " : "", cap);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return mds;
 
 random:
@@ -951,7 +951,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
 
        dout("removing cap %p, ci is %p, inode is %p\n",
             cap, ci, &ci->vfs_inode);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __ceph_remove_cap(cap);
        if (!__ceph_is_any_real_caps(ci)) {
                struct ceph_mds_client *mdsc =
@@ -984,7 +984,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                }
                spin_unlock(&mdsc->cap_dirty_lock);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        while (drop--)
                iput(inode);
        return 0;
@@ -1015,10 +1015,10 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
 
        wake_up_all(&ci->i_cap_wq);
        if (arg) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                ci->i_wanted_max_size = 0;
                ci->i_requested_max_size = 0;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        return 0;
 }
@@ -1151,7 +1151,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        if (session->s_trim_caps <= 0)
                return -1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        mine = cap->issued | cap->implemented;
        used = __ceph_caps_used(ci);
        oissued = __ceph_caps_issued_other(ci, cap);
@@ -1170,7 +1170,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
                __ceph_remove_cap(cap);
        } else {
                /* try to drop referring dentries */
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                d_prune_aliases(inode);
                dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
                     inode, cap, atomic_read(&inode->i_count));
@@ -1178,7 +1178,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        }
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return 0;
 }
 
@@ -1296,7 +1296,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
                                           i_flushing_item);
                        struct inode *inode = &ci->vfs_inode;
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (ci->i_cap_flush_seq <= want_flush_seq) {
                                dout("check_cap_flush still flushing %p "
                                     "seq %lld <= %lld to mds%d\n", inode,
@@ -1304,7 +1304,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
                                     session->s_mds);
                                ret = 0;
                        }
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
                mutex_unlock(&session->s_mutex);
                ceph_put_mds_session(session);
@@ -1495,6 +1495,7 @@ retry:
                             pos, temp);
                } else if (stop_on_nosnap && inode &&
                           ceph_snap(inode) == CEPH_NOSNAP) {
+                       spin_unlock(&temp->d_lock);
                        break;
                } else {
                        pos -= temp->d_name.len;
@@ -2011,10 +2012,10 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ceph_dir_clear_complete(inode);
        ci->i_release_count++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (req->r_dentry)
                ceph_invalidate_dentry_lease(req->r_dentry);
@@ -2422,7 +2423,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        if (err)
                goto out_free;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap->seq = 0;        /* reset cap seq */
        cap->issue_seq = 0;  /* and issue_seq */
 
@@ -2445,7 +2446,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                rec.v1.pathbase = cpu_to_le64(pathbase);
                reclen = sizeof(rec.v1);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (recon_state->flock) {
                int num_fcntl_locks, num_flock_locks;
index 4bb2399..a50ca0e 100644 (file)
@@ -20,7 +20,7 @@
  *
  *         mdsc->snap_rwsem
  *
- *         inode->i_lock
+ *         ci->i_ceph_lock
  *                 mdsc->snap_flush_lock
  *                 mdsc->cap_delay_lock
  *
index e264371..a559c80 100644 (file)
@@ -446,7 +446,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                return;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        used = __ceph_caps_used(ci);
        dirty = __ceph_caps_dirty(ci);
 
@@ -528,7 +528,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                kfree(capsnap);
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -537,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
  *
  * If capsnap can now be flushed, add to snap_flush list, and return 1.
  *
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
  */
 int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
                            struct ceph_cap_snap *capsnap)
@@ -739,9 +739,9 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
                inode = &ci->vfs_inode;
                ihold(inode);
                spin_unlock(&mdsc->snap_flush_lock);
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __ceph_flush_snaps(ci, &session, 0);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                iput(inode);
                spin_lock(&mdsc->snap_flush_lock);
        }
@@ -847,7 +847,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                                continue;
                        ci = ceph_inode(inode);
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (!ci->i_snap_realm)
                                goto skip_inode;
                        /*
@@ -876,7 +876,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        oldrealm = ci->i_snap_realm;
                        ci->i_snap_realm = realm;
                        spin_unlock(&realm->inodes_with_caps_lock);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
 
                        ceph_get_snap_realm(mdsc, realm);
                        ceph_put_snap_realm(mdsc, oldrealm);
@@ -885,7 +885,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        continue;
 
 skip_inode:
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        iput(inode);
                }
 
index 8dc73a5..b48f15f 100644 (file)
@@ -383,7 +383,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
        if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
                seq_printf(m, ",rsize=%d", fsopt->rsize);
        if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
-               seq_printf(m, ",rasize=%d", fsopt->rsize);
+               seq_printf(m, ",rasize=%d", fsopt->rasize);
        if (fsopt->congestion_kb != default_congestion_kb())
                seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
        if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
index 01bf189..edcbf37 100644 (file)
@@ -220,7 +220,7 @@ struct ceph_dentry_info {
  * The locking for D_COMPLETE is a bit odd:
  *  - we can clear it at almost any time (see ceph_d_prune)
  *  - it is only meaningful if:
- *    - we hold dir inode i_lock
+ *    - we hold dir inode i_ceph_lock
  *    - we hold dir FILE_SHARED caps
  *    - the dentry D_COMPLETE is set
  */
@@ -250,6 +250,8 @@ struct ceph_inode_xattrs_info {
 struct ceph_inode_info {
        struct ceph_vino i_vino;   /* ceph ino + snap */
 
+       spinlock_t i_ceph_lock;
+
        u64 i_version;
        u32 i_time_warp_seq;
 
@@ -271,7 +273,7 @@ struct ceph_inode_info {
 
        struct ceph_inode_xattrs_info i_xattrs;
 
-       /* capabilities.  protected _both_ by i_lock and cap->session's
+       /* capabilities.  protected _both_ by i_ceph_lock and cap->session's
         * s_mutex. */
        struct rb_root i_caps;           /* cap list */
        struct ceph_cap *i_auth_cap;     /* authoritative cap, if any */
@@ -437,18 +439,18 @@ static inline void ceph_i_clear(struct inode *inode, unsigned mask)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_ceph_flags &= ~mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 static inline void ceph_i_set(struct inode *inode, unsigned mask)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_ceph_flags |= mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 static inline bool ceph_i_test(struct inode *inode, unsigned mask)
@@ -456,9 +458,9 @@ static inline bool ceph_i_test(struct inode *inode, unsigned mask)
        struct ceph_inode_info *ci = ceph_inode(inode);
        bool r;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        r = (ci->i_ceph_flags & mask) == mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return r;
 }
 
@@ -508,9 +510,9 @@ extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
 static inline int ceph_caps_issued(struct ceph_inode_info *ci)
 {
        int issued;
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        issued = __ceph_caps_issued(ci, NULL);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return issued;
 }
 
@@ -518,9 +520,9 @@ static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
                                        int touch)
 {
        int r;
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        r = __ceph_caps_issued_mask(ci, mask, touch);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return r;
 }
 
@@ -743,10 +745,9 @@ extern int ceph_add_cap(struct inode *inode,
 extern void __ceph_remove_cap(struct ceph_cap *cap);
 static inline void ceph_remove_cap(struct ceph_cap *cap)
 {
-       struct inode *inode = &cap->ci->vfs_inode;
-       spin_lock(&inode->i_lock);
+       spin_lock(&cap->ci->i_ceph_lock);
        __ceph_remove_cap(cap);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&cap->ci->i_ceph_lock);
 }
 extern void ceph_put_cap(struct ceph_mds_client *mdsc,
                         struct ceph_cap *cap);
index 96c6739..a5e36e4 100644 (file)
@@ -343,8 +343,8 @@ void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
 }
 
 static int __build_xattrs(struct inode *inode)
-       __releases(inode->i_lock)
-       __acquires(inode->i_lock)
+       __releases(ci->i_ceph_lock)
+       __acquires(ci->i_ceph_lock)
 {
        u32 namelen;
        u32 numattr = 0;
@@ -372,7 +372,7 @@ start:
                end = p + ci->i_xattrs.blob->vec.iov_len;
                ceph_decode_32_safe(&p, end, numattr, bad);
                xattr_version = ci->i_xattrs.version;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
                                 GFP_NOFS);
@@ -387,7 +387,7 @@ start:
                                goto bad_lock;
                }
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (ci->i_xattrs.version != xattr_version) {
                        /* lost a race, retry */
                        for (i = 0; i < numattr; i++)
@@ -418,7 +418,7 @@ start:
 
        return err;
 bad_lock:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 bad:
        if (xattrs) {
                for (i = 0; i < numattr; i++)
@@ -512,7 +512,7 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
        if (vxattrs)
                vxattr = ceph_match_vxattr(vxattrs, name);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
             ci->i_xattrs.version, ci->i_xattrs.index_version);
 
@@ -520,14 +520,14 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
            (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
                goto get_xattr;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                /* get xattrs from mds (if we don't already have them) */
                err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
                if (err)
                        return err;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (vxattr && vxattr->readonly) {
                err = vxattr->getxattr_cb(ci, value, size);
@@ -558,7 +558,7 @@ get_xattr:
        memcpy(value, xattr->val, xattr->val_len);
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return err;
 }
 
@@ -573,7 +573,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
        u32 len;
        int i;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
             ci->i_xattrs.version, ci->i_xattrs.index_version);
 
@@ -581,13 +581,13 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
            (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
                goto list_xattr;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
                if (err)
                        return err;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        err = __build_xattrs(inode);
        if (err < 0)
@@ -619,7 +619,7 @@ list_xattr:
                }
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return err;
 }
 
@@ -739,7 +739,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
        if (!xattr)
                goto out;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 retry:
        issued = __ceph_caps_issued(ci, NULL);
        if (!(issued & CEPH_CAP_XATTR_EXCL))
@@ -752,12 +752,12 @@ retry:
            required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
                struct ceph_buffer *blob = NULL;
 
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                dout(" preaallocating new blob size=%d\n", required_blob_size);
                blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
                if (!blob)
                        goto out;
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (ci->i_xattrs.prealloc_blob)
                        ceph_buffer_put(ci->i_xattrs.prealloc_blob);
                ci->i_xattrs.prealloc_blob = blob;
@@ -770,13 +770,13 @@ retry:
        dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
        ci->i_xattrs.dirty = true;
        inode->i_ctime = CURRENT_TIME;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (dirty)
                __mark_inode_dirty(inode, dirty);
        return err;
 
 do_sync:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        err = ceph_sync_setxattr(dentry, name, value, size, flags);
 out:
        kfree(newname);
@@ -833,7 +833,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
                        return -EOPNOTSUPP;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __build_xattrs(inode);
        issued = __ceph_caps_issued(ci, NULL);
        dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
@@ -846,12 +846,12 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
        ci->i_xattrs.dirty = true;
        inode->i_ctime = CURRENT_TIME;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (dirty)
                __mark_inode_dirty(inode, dirty);
        return err;
 do_sync:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        err = ceph_send_removexattr(dentry, name);
        return err;
 }
index d6a972d..8cd4b52 100644 (file)
@@ -441,6 +441,8 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
        smb_msg.msg_controllen = 0;
 
        for (total_read = 0; to_read; total_read += length, to_read -= length) {
+               try_to_freeze();
+
                if (server_unresponsive(server)) {
                        total_read = -EAGAIN;
                        break;
index cf0b153..4dd9283 100644 (file)
@@ -702,6 +702,13 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
                                         lock->type, lock->netfid, conf_lock);
 }
 
+/*
+ * Check if there is another lock that prevents us to set the lock (mandatory
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
 static int
 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
               __u8 type, __u16 netfid, struct file_lock *flock)
@@ -739,6 +746,12 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
        mutex_unlock(&cinode->lock_mutex);
 }
 
+/*
+ * Set the byte-range lock (mandatory style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if no locks prevent us but we need to request to the server;
+ * 3) -EACCESS, if there is a lock that prevents us and wait is false.
+ */
 static int
 cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
                 bool wait)
@@ -778,6 +791,13 @@ try_again:
        return rc;
 }
 
+/*
+ * Check if there is another lock that prevents us to set the lock (posix
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
 static int
 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
 {
@@ -800,6 +820,12 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
        return rc;
 }
 
+/*
+ * Set the byte-range lock (posix style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if we need to request to the server;
+ * 3) <0, if the error occurs while setting the lock.
+ */
 static int
 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
 {
index 5de03ec..a090bbe 100644 (file)
@@ -554,7 +554,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
                                 rc);
                        return rc;
                }
-               cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+               /* FindFirst/Next set last_entry to NULL on malformed reply */
+               if (cifsFile->srch_inf.last_entry)
+                       cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+                                               cifsFile);
        }
 
        while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
@@ -562,7 +565,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
                cFYI(1, "calling findnext2");
                rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
                                  &cifsFile->srch_inf);
-               cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+               /* FindFirst/Next set last_entry to NULL on malformed reply */
+               if (cifsFile->srch_inf.last_entry)
+                       cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+                                               cifsFile);
                if (rc)
                        return -ENOENT;
        }
index 7cacba1..80d8508 100644 (file)
@@ -209,7 +209,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
 {
        int rc;
        int len;
-       __u16 wpwd[129];
+       __le16 wpwd[129];
 
        /* Password cannot be longer than 128 characters */
        if (passwd) /* Password must be converted to NT unicode */
@@ -219,8 +219,8 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
                *wpwd = 0; /* Ensure string is null terminated */
        }
 
-       rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16));
-       memset(wpwd, 0, 129 * sizeof(__u16));
+       rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
+       memset(wpwd, 0, 129 * sizeof(__le16));
 
        return rc;
 }
index ca418aa..9d8715c 100644 (file)
@@ -292,7 +292,7 @@ int __init configfs_inode_init(void)
        return bdi_init(&configfs_backing_dev_info);
 }
 
-void __exit configfs_inode_exit(void)
+void configfs_inode_exit(void)
 {
        bdi_destroy(&configfs_backing_dev_info);
 }
index ecc6217..276e15c 100644 (file)
@@ -143,28 +143,26 @@ static int __init configfs_init(void)
                goto out;
 
        config_kobj = kobject_create_and_add("config", kernel_kobj);
-       if (!config_kobj) {
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-               goto out;
-       }
+       if (!config_kobj)
+               goto out2;
+
+       err = configfs_inode_init();
+       if (err)
+               goto out3;
 
        err = register_filesystem(&configfs_fs_type);
-       if (err) {
-               printk(KERN_ERR "configfs: Unable to register filesystem!\n");
-               kobject_put(config_kobj);
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-               goto out;
-       }
+       if (err)
+               goto out4;
 
-       err = configfs_inode_init();
-       if (err) {
-               unregister_filesystem(&configfs_fs_type);
-               kobject_put(config_kobj);
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-       }
+       return 0;
+out4:
+       printk(KERN_ERR "configfs: Unable to register filesystem!\n");
+       configfs_inode_exit();
+out3:
+       kobject_put(config_kobj);
+out2:
+       kmem_cache_destroy(configfs_dir_cachep);
+       configfs_dir_cachep = NULL;
 out:
        return err;
 }
index 10ba92d..89509b5 100644 (file)
@@ -2439,16 +2439,14 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
 /**
  * prepend_path - Prepend path string to a buffer
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buffer: pointer to the end of the buffer
  * @buflen: pointer to buffer length
  *
  * Caller holds the rename_lock.
- *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
  */
-static int prepend_path(const struct path *path, struct path *root,
+static int prepend_path(const struct path *path,
+                       const struct path *root,
                        char **buffer, int *buflen)
 {
        struct dentry *dentry = path->dentry;
@@ -2483,10 +2481,10 @@ static int prepend_path(const struct path *path, struct path *root,
                dentry = parent;
        }
 
-out:
        if (!error && !slash)
                error = prepend(buffer, buflen, "/", 1);
 
+out:
        br_read_unlock(vfsmount_lock);
        return error;
 
@@ -2500,15 +2498,17 @@ global_root:
                WARN(1, "Root dentry has weird name <%.*s>\n",
                     (int) dentry->d_name.len, dentry->d_name.name);
        }
-       root->mnt = vfsmnt;
-       root->dentry = dentry;
+       if (!slash)
+               error = prepend(buffer, buflen, "/", 1);
+       if (!error)
+               error = vfsmnt->mnt_ns ? 1 : 2;
        goto out;
 }
 
 /**
  * __d_path - return the path of a dentry
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buf: buffer to return value in
  * @buflen: buffer length
  *
@@ -2519,10 +2519,10 @@ global_root:
  *
  * "buflen" should be positive.
  *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
+ * If the path is not reachable from the supplied root, return %NULL.
  */
-char *__d_path(const struct path *path, struct path *root,
+char *__d_path(const struct path *path,
+              const struct path *root,
               char *buf, int buflen)
 {
        char *res = buf + buflen;
@@ -2533,7 +2533,28 @@ char *__d_path(const struct path *path, struct path *root,
        error = prepend_path(path, root, &res, &buflen);
        write_sequnlock(&rename_lock);
 
-       if (error)
+       if (error < 0)
+               return ERR_PTR(error);
+       if (error > 0)
+               return NULL;
+       return res;
+}
+
+char *d_absolute_path(const struct path *path,
+              char *buf, int buflen)
+{
+       struct path root = {};
+       char *res = buf + buflen;
+       int error;
+
+       prepend(&res, &buflen, "\0", 1);
+       write_seqlock(&rename_lock);
+       error = prepend_path(path, &root, &res, &buflen);
+       write_sequnlock(&rename_lock);
+
+       if (error > 1)
+               error = -EINVAL;
+       if (error < 0)
                return ERR_PTR(error);
        return res;
 }
@@ -2541,8 +2562,9 @@ char *__d_path(const struct path *path, struct path *root,
 /*
  * same as __d_path but appends "(deleted)" for unlinked files.
  */
-static int path_with_deleted(const struct path *path, struct path *root,
-                                char **buf, int *buflen)
+static int path_with_deleted(const struct path *path,
+                            const struct path *root,
+                            char **buf, int *buflen)
 {
        prepend(buf, buflen, "\0", 1);
        if (d_unlinked(path->dentry)) {
@@ -2579,7 +2601,6 @@ char *d_path(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        /*
@@ -2594,9 +2615,8 @@ char *d_path(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (error)
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error < 0)
                res = ERR_PTR(error);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2617,7 +2637,6 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        if (path->dentry->d_op && path->dentry->d_op->d_dname)
@@ -2625,9 +2644,8 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (!error && !path_equal(&tmp, &root))
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error > 0)
                error = prepend_unreachable(&res, &buflen);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2758,19 +2776,18 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
        write_seqlock(&rename_lock);
        if (!d_unlinked(pwd.dentry)) {
                unsigned long len;
-               struct path tmp = root;
                char *cwd = page + PAGE_SIZE;
                int buflen = PAGE_SIZE;
 
                prepend(&cwd, &buflen, "\0", 1);
-               error = prepend_path(&pwd, &tmp, &cwd, &buflen);
+               error = prepend_path(&pwd, &root, &cwd, &buflen);
                write_sequnlock(&rename_lock);
 
-               if (error)
+               if (error < 0)
                        goto out;
 
                /* Unreachable from current root */
-               if (!path_equal(&tmp, &root)) {
+               if (error > 0) {
                        error = prepend_unreachable(&cwd, &buflen);
                        if (error)
                                goto out;
index 61fa9e1..607b155 100644 (file)
@@ -1095,7 +1095,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
                  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
                  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
 
-       neh->eh_depth = cpu_to_le16(neh->eh_depth + 1);
+       neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
        ext4_mark_inode_dirty(handle, inode);
 out:
        brelse(bh);
@@ -2955,7 +2955,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        /* Pre-conditions */
        BUG_ON(!ext4_ext_is_uninitialized(ex));
        BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
-       BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len);
 
        /*
         * Attempt to transfer newly initialized blocks from the currently
index 848f436..92655fd 100644 (file)
@@ -1339,8 +1339,11 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
                                        clear_buffer_unwritten(bh);
                                }
 
-                               /* skip page if block allocation undone */
-                               if (buffer_delay(bh) || buffer_unwritten(bh))
+                               /*
+                                * skip page if block allocation undone and
+                                * block is dirty
+                                */
+                               if (ext4_bh_delay_or_unwritten(NULL, bh))
                                        skip_page = 1;
                                bh = bh->b_this_page;
                                block_start += bh->b_size;
@@ -2387,7 +2390,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
        pgoff_t index;
        struct inode *inode = mapping->host;
        handle_t *handle;
-       loff_t page_len;
 
        index = pos >> PAGE_CACHE_SHIFT;
 
@@ -2434,13 +2436,6 @@ retry:
                 */
                if (pos + len > inode->i_size)
                        ext4_truncate_failed_write(inode);
-       } else {
-               page_len = pos & (PAGE_CACHE_SIZE - 1);
-               if (page_len > 0) {
-                       ret = ext4_discard_partial_page_buffers_no_lock(handle,
-                               inode, page, pos - page_len, page_len,
-                               EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
-               }
        }
 
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -2483,7 +2478,6 @@ static int ext4_da_write_end(struct file *file,
        loff_t new_i_size;
        unsigned long start, end;
        int write_mode = (int)(unsigned long)fsdata;
-       loff_t page_len;
 
        if (write_mode == FALL_BACK_TO_NONDELALLOC) {
                if (ext4_should_order_data(inode)) {
@@ -2508,7 +2502,7 @@ static int ext4_da_write_end(struct file *file,
         */
 
        new_i_size = pos + copied;
-       if (new_i_size > EXT4_I(inode)->i_disksize) {
+       if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
                if (ext4_da_should_update_i_disksize(page, end)) {
                        down_write(&EXT4_I(inode)->i_data_sem);
                        if (new_i_size > EXT4_I(inode)->i_disksize) {
@@ -2532,16 +2526,6 @@ static int ext4_da_write_end(struct file *file,
        }
        ret2 = generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
-
-       page_len = PAGE_CACHE_SIZE -
-                       ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
-
-       if (page_len > 0) {
-               ret = ext4_discard_partial_page_buffers_no_lock(handle,
-                       inode, page, pos + copied - 1, page_len,
-                       EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
-       }
-
        copied = ret2;
        if (ret2 < 0)
                ret = ret2;
@@ -2781,10 +2765,11 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                  iocb->private, io_end->inode->i_ino, iocb, offset,
                  size);
 
+       iocb->private = NULL;
+
        /* if not aio dio with unwritten extents, just free io and return */
        if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
                ext4_free_io_end(io_end);
-               iocb->private = NULL;
 out:
                if (is_async)
                        aio_complete(iocb, ret, 0);
@@ -2807,7 +2792,6 @@ out:
        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 
        /* queue the work to convert unwritten extents to written */
-       iocb->private = NULL;
        queue_work(wq, &io_end->work);
 
        /* XXX: probably should move into the real I/O completion handler */
@@ -3203,26 +3187,8 @@ int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
 
        iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 
-       if (!page_has_buffers(page)) {
-               /*
-                * If the range to be discarded covers a partial block
-                * we need to get the page buffers.  This is because
-                * partial blocks cannot be released and the page needs
-                * to be updated with the contents of the block before
-                * we write the zeros on top of it.
-                */
-               if ((from & (blocksize - 1)) ||
-                   ((from + length) & (blocksize - 1))) {
-                       create_empty_buffers(page, blocksize, 0);
-               } else {
-                       /*
-                        * If there are no partial blocks,
-                        * there is nothing to update,
-                        * so we can return now
-                        */
-                       return 0;
-               }
-       }
+       if (!page_has_buffers(page))
+               create_empty_buffers(page, blocksize, 0);
 
        /* Find the buffer that contains "offset" */
        bh = page_buffers(page);
index 7ce1d0b..7e106c8 100644 (file)
@@ -385,6 +385,18 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 
                block_end = block_start + blocksize;
                if (block_start >= len) {
+                       /*
+                        * Comments copied from block_write_full_page_endio:
+                        *
+                        * The page straddles i_size.  It must be zeroed out on
+                        * each and every writepage invocation because it may
+                        * be mmapped.  "A file is mapped in multiples of the
+                        * page size.  For a file that is not a multiple of
+                        * the  page size, the remaining memory is zeroed when
+                        * mapped, and writes to that region are not written
+                        * out to the file."
+                        */
+                       zero_user_segment(page, block_start, block_end);
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
                        continue;
index 3858767..3e1329e 100644 (file)
@@ -1155,9 +1155,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
                seq_puts(seq, ",block_validity");
 
        if (!test_opt(sb, INIT_INODE_TABLE))
-               seq_puts(seq, ",noinit_inode_table");
+               seq_puts(seq, ",noinit_itable");
        else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
-               seq_printf(seq, ",init_inode_table=%u",
+               seq_printf(seq, ",init_itable=%u",
                           (unsigned) sbi->s_li_wait_mult);
 
        ext4_show_quota_options(seq, sb);
@@ -1333,8 +1333,7 @@ enum {
        Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
        Opt_inode_readahead_blks, Opt_journal_ioprio,
        Opt_dioread_nolock, Opt_dioread_lock,
-       Opt_discard, Opt_nodiscard,
-       Opt_init_inode_table, Opt_noinit_inode_table,
+       Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
 };
 
 static const match_table_t tokens = {
@@ -1407,9 +1406,9 @@ static const match_table_t tokens = {
        {Opt_dioread_lock, "dioread_lock"},
        {Opt_discard, "discard"},
        {Opt_nodiscard, "nodiscard"},
-       {Opt_init_inode_table, "init_itable=%u"},
-       {Opt_init_inode_table, "init_itable"},
-       {Opt_noinit_inode_table, "noinit_itable"},
+       {Opt_init_itable, "init_itable=%u"},
+       {Opt_init_itable, "init_itable"},
+       {Opt_noinit_itable, "noinit_itable"},
        {Opt_err, NULL},
 };
 
@@ -1892,7 +1891,7 @@ set_qf_format:
                case Opt_dioread_lock:
                        clear_opt(sb, DIOREAD_NOLOCK);
                        break;
-               case Opt_init_inode_table:
+               case Opt_init_itable:
                        set_opt(sb, INIT_INODE_TABLE);
                        if (args[0].from) {
                                if (match_int(&args[0], &option))
@@ -1903,7 +1902,7 @@ set_qf_format:
                                return 0;
                        sbi->s_li_wait_mult = option;
                        break;
-               case Opt_noinit_inode_table:
+               case Opt_noinit_itable:
                        clear_opt(sb, INIT_INODE_TABLE);
                        break;
                default:
index 73c3992..ac86f8b 100644 (file)
@@ -156,6 +156,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
  * bdi_start_writeback - start writeback
  * @bdi: the backing device to write from
  * @nr_pages: the number of pages to write
+ * @reason: reason why some writeback work was initiated
  *
  * Description:
  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
@@ -1223,6 +1224,7 @@ static void wait_sb_inodes(struct super_block *sb)
  * writeback_inodes_sb_nr -    writeback dirty inodes from given super_block
  * @sb: the superblock
  * @nr: the number of pages to write
+ * @reason: reason why some writeback work initiated
  *
  * Start writeback on some inodes on this super_block. No guarantees are made
  * on how many (if any) will be written, and this function does not wait
@@ -1251,6 +1253,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
 /**
  * writeback_inodes_sb -       writeback dirty inodes from given super_block
  * @sb: the superblock
+ * @reason: reason why some writeback work was initiated
  *
  * Start writeback on some inodes on this super_block. No guarantees are made
  * on how many (if any) will be written, and this function does not wait
@@ -1265,6 +1268,7 @@ EXPORT_SYMBOL(writeback_inodes_sb);
 /**
  * writeback_inodes_sb_if_idle -       start writeback if none underway
  * @sb: the superblock
+ * @reason: reason why some writeback work was initiated
  *
  * Invoke writeback_inodes_sb if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
@@ -1285,6 +1289,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
  * writeback_inodes_sb_if_idle -       start writeback if none underway
  * @sb: the superblock
  * @nr: the number of pages to write
+ * @reason: reason why some writeback work was initiated
  *
  * Invoke writeback_inodes_sb if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
index 5cb8614..2aaf3ea 100644 (file)
@@ -1512,7 +1512,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        else if (outarg->offset + num > file_size)
                num = file_size - outarg->offset;
 
-       while (num) {
+       while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
                struct page *page;
                unsigned int this_num;
 
@@ -1526,6 +1526,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
 
                num -= this_num;
                total_len += this_num;
+               index++;
        }
        req->misc.retrieve_in.offset = outarg->offset;
        req->misc.retrieve_in.size = total_len;
index 594f07a..0c84100 100644 (file)
@@ -1556,7 +1556,7 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
        struct inode *inode = file->f_path.dentry->d_inode;
 
        mutex_lock(&inode->i_mutex);
-       if (origin != SEEK_CUR || origin != SEEK_SET) {
+       if (origin != SEEK_CUR && origin != SEEK_SET) {
                retval = fuse_update_attributes(inode, NULL, file, NULL);
                if (retval)
                        goto exit;
@@ -1567,6 +1567,10 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
                offset += i_size_read(inode);
                break;
        case SEEK_CUR:
+               if (offset == 0) {
+                       retval = file->f_pos;
+                       goto exit;
+               }
                offset += file->f_pos;
                break;
        case SEEK_DATA:
index 3e6d727..aa83109 100644 (file)
@@ -1138,28 +1138,28 @@ static int __init fuse_fs_init(void)
 {
        int err;
 
-       err = register_filesystem(&fuse_fs_type);
-       if (err)
-               goto out;
-
-       err = register_fuseblk();
-       if (err)
-               goto out_unreg;
-
        fuse_inode_cachep = kmem_cache_create("fuse_inode",
                                              sizeof(struct fuse_inode),
                                              0, SLAB_HWCACHE_ALIGN,
                                              fuse_inode_init_once);
        err = -ENOMEM;
        if (!fuse_inode_cachep)
-               goto out_unreg2;
+               goto out;
+
+       err = register_fuseblk();
+       if (err)
+               goto out2;
+
+       err = register_filesystem(&fuse_fs_type);
+       if (err)
+               goto out3;
 
        return 0;
 
- out_unreg2:
+ out3:
        unregister_fuseblk();
- out_unreg:
-       unregister_filesystem(&fuse_fs_type);
+ out2:
+       kmem_cache_destroy(fuse_inode_cachep);
  out:
        return err;
 }
index 6d3a196..cfc6d44 100644 (file)
@@ -1048,15 +1048,12 @@ static int show_mountinfo(struct seq_file *m, void *v)
        if (err)
                goto out;
        seq_putc(m, ' ');
-       seq_path_root(m, &mnt_path, &root, " \t\n\\");
-       if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
-               /*
-                * Mountpoint is outside root, discard that one.  Ugly,
-                * but less so than trying to do that in iterator in a
-                * race-free way (due to renames).
-                */
-               return SEQ_SKIP;
-       }
+
+       /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+       err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+       if (err)
+               goto out;
+
        seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
        show_mnt_opts(m, mnt);
 
@@ -2776,3 +2773,8 @@ void kern_unmount(struct vfsmount *mnt)
        }
 }
 EXPORT_SYMBOL(kern_unmount);
+
+bool our_mnt(struct vfsmount *mnt)
+{
+       return check_mnt(mnt);
+}
index 5b5fa33..cbd1a61 100644 (file)
@@ -548,7 +548,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
 
        error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
        if (error)
-               goto out_bdi;
+               goto out_fput;
 
        server->ncp_filp = ncp_filp;
        server->ncp_sock = sock;
@@ -559,7 +559,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
                error = -EBADF;
                server->info_filp = fget(data.info_fd);
                if (!server->info_filp)
-                       goto out_fput;
+                       goto out_bdi;
                error = -ENOTSOCK;
                sock_inode = server->info_filp->f_path.dentry->d_inode;
                if (!S_ISSOCK(sock_inode->i_mode))
@@ -746,9 +746,9 @@ out_nls:
 out_fput2:
        if (server->info_filp)
                fput(server->info_filp);
-out_fput:
-       bdi_destroy(&server->bdi);
 out_bdi:
+       bdi_destroy(&server->bdi);
+out_fput:
        /* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
         * 
         * The previously used put_filp(ncp_filp); was bogus, since
index eca56d4..606ef0f 100644 (file)
@@ -147,7 +147,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
         * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
         * the cached file length
         */
-       if (origin != SEEK_SET || origin != SEEK_CUR) {
+       if (origin != SEEK_SET && origin != SEEK_CUR) {
                struct inode *inode = filp->f_mapping->host;
 
                int retval = nfs_revalidate_file_size(inode, filp);
index be2bbac..d9f4d78 100644 (file)
@@ -39,6 +39,8 @@
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/string.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
 #include <linux/slab.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/gss_api.h>
@@ -894,6 +896,8 @@ out:
 
 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
 {
+       if (delegation == NULL)
+               return 0;
        if ((delegation->type & fmode) != fmode)
                return 0;
        if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
@@ -1036,8 +1040,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
                }
                rcu_read_lock();
                delegation = rcu_dereference(nfsi->delegation);
-               if (delegation == NULL ||
-                   !can_open_delegated(delegation, fmode)) {
+               if (!can_open_delegated(delegation, fmode)) {
                        rcu_read_unlock();
                        break;
                }
@@ -1091,7 +1094,12 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
                if (delegation)
                        delegation_flags = delegation->flags;
                rcu_read_unlock();
-               if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+               if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+                       pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
+                                       "returning a delegation for "
+                                       "OPEN(CLAIM_DELEGATE_CUR)\n",
+                                       NFS_CLIENT(inode)->cl_server);
+               } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
                        nfs_inode_set_delegation(state->inode,
                                        data->owner->so_cred,
                                        &data->o_res);
@@ -1423,11 +1431,9 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
                        goto out_no_action;
                rcu_read_lock();
                delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
-               if (delegation != NULL &&
-                   test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
-                       rcu_read_unlock();
-                       goto out_no_action;
-               }
+               if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
+                   can_open_delegated(delegation, data->o_arg.fmode))
+                       goto unlock_no_action;
                rcu_read_unlock();
        }
        /* Update sequence id. */
@@ -1444,6 +1450,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
                return;
        rpc_call_start(task);
        return;
+unlock_no_action:
+       rcu_read_unlock();
 out_no_action:
        task->tk_action = NULL;
 
index 39914be..6a7107a 100644 (file)
@@ -1156,11 +1156,13 @@ restart:
                if (status >= 0) {
                        status = nfs4_reclaim_locks(state, ops);
                        if (status >= 0) {
+                               spin_lock(&state->state_lock);
                                list_for_each_entry(lock, &state->lock_states, ls_locks) {
                                        if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
                                                printk("%s: Lock reclaim failed!\n",
                                                        __func__);
                                }
+                               spin_unlock(&state->state_lock);
                                nfs4_put_open_state(state);
                                goto restart;
                        }
@@ -1224,10 +1226,12 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
        clear_bit(NFS_O_RDONLY_STATE, &state->flags);
        clear_bit(NFS_O_WRONLY_STATE, &state->flags);
        clear_bit(NFS_O_RDWR_STATE, &state->flags);
+       spin_lock(&state->state_lock);
        list_for_each_entry(lock, &state->lock_states, ls_locks) {
                lock->ls_seqid.flags = 0;
                lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
        }
+       spin_unlock(&state->state_lock);
 }
 
 static void nfs4_reset_seqids(struct nfs_server *server,
@@ -1350,12 +1354,14 @@ static void nfs4_warn_keyexpired(const char *s)
 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
 {
        switch (error) {
+               case 0:
+                       break;
                case -NFS4ERR_CB_PATH_DOWN:
                        nfs_handle_cb_pathdown(clp);
-                       return 0;
+                       break;
                case -NFS4ERR_NO_GRACE:
                        nfs4_state_end_reclaim_reboot(clp);
-                       return 0;
+                       break;
                case -NFS4ERR_STALE_CLIENTID:
                case -NFS4ERR_LEASE_MOVED:
                        set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
@@ -1375,13 +1381,15 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
                case -NFS4ERR_SEQ_MISORDERED:
                        set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
                        /* Zero session reset errors */
-                       return 0;
+                       break;
                case -EKEYEXPIRED:
                        /* Nothing we can do */
                        nfs4_warn_keyexpired(clp->cl_hostname);
-                       return 0;
+                       break;
+               default:
+                       return error;
        }
-       return error;
+       return 0;
 }
 
 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1428,7 +1436,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
        struct rpc_cred *cred;
        const struct nfs4_state_maintenance_ops *ops =
                clp->cl_mvops->state_renewal_ops;
-       int status = -NFS4ERR_EXPIRED;
+       int status;
 
        /* Is the client already known to have an expired lease? */
        if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
@@ -1438,6 +1446,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
        spin_unlock(&clp->cl_lock);
        if (cred == NULL) {
                cred = nfs4_get_setclientid_cred(clp);
+               status = -ENOKEY;
                if (cred == NULL)
                        goto out;
        }
@@ -1525,16 +1534,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
 {
        if (!flags)
                return;
-       else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+       if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
                nfs41_handle_server_reboot(clp);
-       else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+       if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
                            SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
                            SEQ4_STATUS_ADMIN_STATE_REVOKED |
                            SEQ4_STATUS_LEASE_MOVED))
                nfs41_handle_state_revoked(clp);
-       else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+       if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
                nfs41_handle_recallable_state_revoked(clp);
-       else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+       if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
                            SEQ4_STATUS_BACKCHANNEL_FAULT |
                            SEQ4_STATUS_CB_PATH_DOWN_SESSION))
                nfs41_handle_cb_path_down(clp);
@@ -1662,10 +1671,10 @@ static void nfs4_state_manager(struct nfs_client *clp)
 
                if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
                        status = nfs4_check_lease(clp);
+                       if (status < 0)
+                               goto out_error;
                        if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
                                continue;
-                       if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
-                               goto out_error;
                }
 
                /* Initialize or reset the session */
index 41d6743..ac258be 100644 (file)
@@ -625,6 +625,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
                if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
                        goto out_free;
 
+               if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
+                       goto out_free;
+
                len = argv[n].v_size * argv[n].v_nmembs;
                base = (void __user *)(unsigned long)argv[n].v_base;
                if (len == 0) {
@@ -842,6 +845,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case FS_IOC32_GETVERSION:
                cmd = FS_IOC_GETVERSION;
                break;
+       case NILFS_IOCTL_CHANGE_CPMODE:
+       case NILFS_IOCTL_DELETE_CHECKPOINT:
+       case NILFS_IOCTL_GET_CPINFO:
+       case NILFS_IOCTL_GET_CPSTAT:
+       case NILFS_IOCTL_GET_SUINFO:
+       case NILFS_IOCTL_GET_SUSTAT:
+       case NILFS_IOCTL_GET_VINFO:
+       case NILFS_IOCTL_GET_BDESCS:
+       case NILFS_IOCTL_CLEAN_SEGMENTS:
+       case NILFS_IOCTL_SYNC:
+       case NILFS_IOCTL_RESIZE:
+       case NILFS_IOCTL_SET_ALLOC_RANGE:
+               break;
        default:
                return -ENOIOCTLCMD;
        }
index 5861741..80e4645 100644 (file)
@@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(i.freeswap),
                K(global_page_state(NR_FILE_DIRTY)),
                K(global_page_state(NR_WRITEBACK)),
-               K(global_page_state(NR_ANON_PAGES)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               K(global_page_state(NR_ANON_PAGES)
                  + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
-                 HPAGE_PMD_NR
+                 HPAGE_PMD_NR),
+#else
+               K(global_page_state(NR_ANON_PAGES)),
 #endif
-                 ),
                K(global_page_state(NR_FILE_MAPPED)),
                K(global_page_state(NR_SHMEM)),
                K(global_page_state(NR_SLAB_RECLAIMABLE) +
index 9a8a2b7..03102d9 100644 (file)
@@ -91,20 +91,18 @@ static struct file_system_type proc_fs_type = {
 
 void __init proc_root_init(void)
 {
-       struct vfsmount *mnt;
        int err;
 
        proc_init_inodecache();
        err = register_filesystem(&proc_fs_type);
        if (err)
                return;
-       mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
-       if (IS_ERR(mnt)) {
+       err = pid_ns_prepare_proc(&init_pid_ns);
+       if (err) {
                unregister_filesystem(&proc_fs_type);
                return;
        }
 
-       init_pid_ns.proc_mnt = mnt;
        proc_symlink("mounts", NULL, "self/mounts");
 
        proc_net_init();
@@ -209,5 +207,5 @@ int pid_ns_prepare_proc(struct pid_namespace *ns)
 
 void pid_ns_release_proc(struct pid_namespace *ns)
 {
-       mntput(ns->proc_mnt);
+       kern_unmount(ns->proc_mnt);
 }
index 42b274d..2a30d67 100644 (file)
@@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu)
                idle = kstat_cpu(cpu).cpustat.idle;
                idle = cputime64_add(idle, arch_idle_time(cpu));
        } else
-               idle = usecs_to_cputime(idle_time);
+               idle = nsecs_to_jiffies64(1000 * idle_time);
 
        return idle;
 }
@@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu)
                /* !NO_HZ so we can rely on cpustat.iowait */
                iowait = kstat_cpu(cpu).cpustat.iowait;
        else
-               iowait = usecs_to_cputime(iowait_time);
+               iowait = nsecs_to_jiffies64(1000 * iowait_time);
 
        return iowait;
 }
index 05d6b0e..dba43c3 100644 (file)
@@ -449,8 +449,6 @@ EXPORT_SYMBOL(seq_path);
 
 /*
  * Same as seq_path, but relative to supplied root.
- *
- * root may be changed, see __d_path().
  */
 int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                  char *esc)
@@ -463,6 +461,8 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                char *p;
 
                p = __d_path(path, root, buf, size);
+               if (!p)
+                       return SEQ_SKIP;
                res = PTR_ERR(p);
                if (!IS_ERR(p)) {
                        char *end = mangle_path(buf, p, esc);
@@ -474,7 +474,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
        }
        seq_commit(m, res);
 
-       return res < 0 ? res : 0;
+       return res < 0 && res != -ENAMETOOLONG ? res : 0;
 }
 
 /*
index 20403dc..ae0e76b 100644 (file)
@@ -2264,19 +2264,12 @@ static int __init ubifs_init(void)
                return -EINVAL;
        }
 
-       err = register_filesystem(&ubifs_fs_type);
-       if (err) {
-               ubifs_err("cannot register file system, error %d", err);
-               return err;
-       }
-
-       err = -ENOMEM;
        ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab",
                                sizeof(struct ubifs_inode), 0,
                                SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
                                &inode_slab_ctor);
        if (!ubifs_inode_slab)
-               goto out_reg;
+               return -ENOMEM;
 
        register_shrinker(&ubifs_shrinker_info);
 
@@ -2288,15 +2281,20 @@ static int __init ubifs_init(void)
        if (err)
                goto out_compr;
 
+       err = register_filesystem(&ubifs_fs_type);
+       if (err) {
+               ubifs_err("cannot register file system, error %d", err);
+               goto out_dbg;
+       }
        return 0;
 
+out_dbg:
+       dbg_debugfs_exit();
 out_compr:
        ubifs_compressors_exit();
 out_shrinker:
        unregister_shrinker(&ubifs_shrinker_info);
        kmem_cache_destroy(ubifs_inode_slab);
-out_reg:
-       unregister_filesystem(&ubifs_fs_type);
        return err;
 }
 /* late_initcall to let compressors initialize first */
index b6c4b37..76e4266 100644 (file)
@@ -42,6 +42,8 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
        int count, i;
 
        count = be32_to_cpu(aclp->acl_cnt);
+       if (count > XFS_ACL_MAX_ENTRIES)
+               return ERR_PTR(-EFSCORRUPTED);
 
        acl = posix_acl_alloc(count, GFP_KERNEL);
        if (!acl)
index d4906e7..c1b55e5 100644 (file)
@@ -110,6 +110,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
 /*
  * Query whether the requested number of additional bytes of extended
  * attribute space will be able to fit inline.
+ *
  * Returns zero if not, else the di_forkoff fork offset to be used in the
  * literal area for attribute data once the new bytes have been added.
  *
@@ -122,7 +123,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
        int offset;
        int minforkoff; /* lower limit on valid forkoff locations */
        int maxforkoff; /* upper limit on valid forkoff locations */
-       int dsize;      
+       int dsize;
        xfs_mount_t *mp = dp->i_mount;
 
        offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
@@ -136,47 +137,60 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
                return (offset >= minforkoff) ? minforkoff : 0;
        }
 
-       if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
-               if (bytes <= XFS_IFORK_ASIZE(dp))
-                       return dp->i_d.di_forkoff;
+       /*
+        * If the requested numbers of bytes is smaller or equal to the
+        * current attribute fork size we can always proceed.
+        *
+        * Note that if_bytes in the data fork might actually be larger than
+        * the current data fork size is due to delalloc extents. In that
+        * case either the extent count will go down when they are converted
+        * to real extents, or the delalloc conversion will take care of the
+        * literal area rebalancing.
+        */
+       if (bytes <= XFS_IFORK_ASIZE(dp))
+               return dp->i_d.di_forkoff;
+
+       /*
+        * For attr2 we can try to move the forkoff if there is space in the
+        * literal area, but for the old format we are done if there is no
+        * space in the fixed attribute fork.
+        */
+       if (!(mp->m_flags & XFS_MOUNT_ATTR2))
                return 0;
-       }
 
        dsize = dp->i_df.if_bytes;
-       
+
        switch (dp->i_d.di_format) {
        case XFS_DINODE_FMT_EXTENTS:
-               /* 
+               /*
                 * If there is no attr fork and the data fork is extents, 
-                * determine if creating the default attr fork will result 
-                * in the extents form migrating to btree. If so, the 
-                * minimum offset only needs to be the space required for 
+                * determine if creating the default attr fork will result
+                * in the extents form migrating to btree. If so, the
+                * minimum offset only needs to be the space required for
                 * the btree root.
-                */ 
+                */
                if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
                    xfs_default_attroffset(dp))
                        dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
                break;
-               
        case XFS_DINODE_FMT_BTREE:
                /*
-                * If have data btree then keep forkoff if we have one,
-                * otherwise we are adding a new attr, so then we set 
-                * minforkoff to where the btree root can finish so we have 
+                * If we have a data btree then keep forkoff if we have one,
+                * otherwise we are adding a new attr, so then we set
+                * minforkoff to where the btree root can finish so we have
                 * plenty of room for attrs
                 */
                if (dp->i_d.di_forkoff) {
-                       if (offset < dp->i_d.di_forkoff) 
+                       if (offset < dp->i_d.di_forkoff)
                                return 0;
-                       else 
-                               return dp->i_d.di_forkoff;
-               } else
-                       dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
+                       return dp->i_d.di_forkoff;
+               }
+               dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
                break;
        }
-       
-       /* 
-        * A data fork btree root must have space for at least 
+
+       /*
+        * A data fork btree root must have space for at least
         * MINDBTPTRS key/ptr pairs if the data fork is small or empty.
         */
        minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
@@ -186,10 +200,10 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
        maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
        maxforkoff = maxforkoff >> 3;   /* rounded down */
 
-       if (offset >= minforkoff && offset < maxforkoff)
-               return offset;
        if (offset >= maxforkoff)
                return maxforkoff;
+       if (offset >= minforkoff)
+               return offset;
        return 0;
 }
 
index c68baeb..d0ab788 100644 (file)
@@ -2383,6 +2383,8 @@ xfs_bmap_btalloc(
        int             tryagain;
        int             error;
 
+       ASSERT(ap->length);
+
        mp = ap->ip->i_mount;
        align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
        if (unlikely(align)) {
@@ -4629,6 +4631,8 @@ xfs_bmapi_allocate(
        int                     error;
        int                     rt;
 
+       ASSERT(bma->length > 0);
+
        rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
 
        /*
@@ -4849,6 +4853,7 @@ xfs_bmapi_write(
        ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
        ASSERT(!(flags & XFS_BMAPI_IGSTATE));
        ASSERT(tp != NULL);
+       ASSERT(len > 0);
 
        whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
                XFS_ATTR_FORK : XFS_DATA_FORK;
@@ -4918,9 +4923,22 @@ xfs_bmapi_write(
                        bma.eof = eof;
                        bma.conv = !!(flags & XFS_BMAPI_CONVERT);
                        bma.wasdel = wasdelay;
-                       bma.length = len;
                        bma.offset = bno;
 
+                       /*
+                        * There's a 32/64 bit type mismatch between the
+                        * allocation length request (which can be 64 bits in
+                        * length) and the bma length request, which is
+                        * xfs_extlen_t and therefore 32 bits. Hence we have to
+                        * check for 32-bit overflows and handle them here.
+                        */
+                       if (len > (xfs_filblks_t)MAXEXTLEN)
+                               bma.length = MAXEXTLEN;
+                       else
+                               bma.length = len;
+
+                       ASSERT(len > 0);
+                       ASSERT(bma.length > 0);
                        error = xfs_bmapi_allocate(&bma, flags);
                        if (error)
                                goto error0;
index da10897..558910f 100644 (file)
@@ -98,22 +98,22 @@ xfs_fs_encode_fh(
        switch (fileid_type) {
        case FILEID_INO32_GEN_PARENT:
                spin_lock(&dentry->d_lock);
-               fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN:
-               fid->i32.ino = inode->i_ino;
+               fid->i32.ino = XFS_I(inode)->i_ino;
                fid->i32.gen = inode->i_generation;
                break;
        case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
                spin_lock(&dentry->d_lock);
-               fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
-               fid64->ino = inode->i_ino;
+               fid64->ino = XFS_I(inode)->i_ino;
                fid64->gen = inode->i_generation;
                break;
        }
index c0237c6..755ee81 100644 (file)
@@ -2835,6 +2835,27 @@ corrupt_out:
        return XFS_ERROR(EFSCORRUPTED);
 }
 
+void
+xfs_promote_inode(
+       struct xfs_inode        *ip)
+{
+       struct xfs_buf          *bp;
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+
+       bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
+                       ip->i_imap.im_len, XBF_TRYLOCK);
+       if (!bp)
+               return;
+
+       if (XFS_BUF_ISDELAYWRITE(bp)) {
+               xfs_buf_delwri_promote(bp);
+               wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
+       }
+
+       xfs_buf_relse(bp);
+}
+
 /*
  * Return a pointer to the extent record at file index idx.
  */
index 760140d..b4cd473 100644 (file)
@@ -498,6 +498,7 @@ int         xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
 void           xfs_iext_realloc(xfs_inode_t *, int, int);
 void           xfs_iunpin_wait(xfs_inode_t *);
 int            xfs_iflush(xfs_inode_t *, uint);
+void           xfs_promote_inode(struct xfs_inode *);
 void           xfs_lock_inodes(xfs_inode_t **, int, uint);
 void           xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
 
index a14cd89..34817ad 100644 (file)
@@ -150,6 +150,117 @@ xlog_grant_add_space(
        } while (head_val != old);
 }
 
+STATIC bool
+xlog_reserveq_wake(
+       struct log              *log,
+       int                     *free_bytes)
+{
+       struct xlog_ticket      *tic;
+       int                     need_bytes;
+
+       list_for_each_entry(tic, &log->l_reserveq, t_queue) {
+               if (tic->t_flags & XLOG_TIC_PERM_RESERV)
+                       need_bytes = tic->t_unit_res * tic->t_cnt;
+               else
+                       need_bytes = tic->t_unit_res;
+
+               if (*free_bytes < need_bytes)
+                       return false;
+               *free_bytes -= need_bytes;
+
+               trace_xfs_log_grant_wake_up(log, tic);
+               wake_up(&tic->t_wait);
+       }
+
+       return true;
+}
+
+STATIC bool
+xlog_writeq_wake(
+       struct log              *log,
+       int                     *free_bytes)
+{
+       struct xlog_ticket      *tic;
+       int                     need_bytes;
+
+       list_for_each_entry(tic, &log->l_writeq, t_queue) {
+               ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
+
+               need_bytes = tic->t_unit_res;
+
+               if (*free_bytes < need_bytes)
+                       return false;
+               *free_bytes -= need_bytes;
+
+               trace_xfs_log_regrant_write_wake_up(log, tic);
+               wake_up(&tic->t_wait);
+       }
+
+       return true;
+}
+
+STATIC int
+xlog_reserveq_wait(
+       struct log              *log,
+       struct xlog_ticket      *tic,
+       int                     need_bytes)
+{
+       list_add_tail(&tic->t_queue, &log->l_reserveq);
+
+       do {
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+               xlog_grant_push_ail(log, need_bytes);
+
+               XFS_STATS_INC(xs_sleep_logspace);
+               trace_xfs_log_grant_sleep(log, tic);
+
+               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+               trace_xfs_log_grant_wake(log, tic);
+
+               spin_lock(&log->l_grant_reserve_lock);
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+       } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
+
+       list_del_init(&tic->t_queue);
+       return 0;
+shutdown:
+       list_del_init(&tic->t_queue);
+       return XFS_ERROR(EIO);
+}
+
+STATIC int
+xlog_writeq_wait(
+       struct log              *log,
+       struct xlog_ticket      *tic,
+       int                     need_bytes)
+{
+       list_add_tail(&tic->t_queue, &log->l_writeq);
+
+       do {
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+               xlog_grant_push_ail(log, need_bytes);
+
+               XFS_STATS_INC(xs_sleep_logspace);
+               trace_xfs_log_regrant_write_sleep(log, tic);
+
+               xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+               trace_xfs_log_regrant_write_wake(log, tic);
+
+               spin_lock(&log->l_grant_write_lock);
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+       } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
+
+       list_del_init(&tic->t_queue);
+       return 0;
+shutdown:
+       list_del_init(&tic->t_queue);
+       return XFS_ERROR(EIO);
+}
+
 static void
 xlog_tic_reset_res(xlog_ticket_t *tic)
 {
@@ -350,8 +461,19 @@ xfs_log_reserve(
                retval = xlog_grant_log_space(log, internal_ticket);
        }
 
+       if (unlikely(retval)) {
+               /*
+                * If we are failing, make sure the ticket doesn't have any
+                * current reservations.  We don't want to add this back
+                * when the ticket/ transaction gets cancelled.
+                */
+               internal_ticket->t_curr_res = 0;
+               /* ungrant will give back unit_res * t_cnt. */
+               internal_ticket->t_cnt = 0;
+       }
+
        return retval;
-}      /* xfs_log_reserve */
+}
 
 
 /*
@@ -2481,8 +2603,8 @@ restart:
 /*
  * Atomically get the log space required for a log ticket.
  *
- * Once a ticket gets put onto the reserveq, it will only return after
- * the needed reservation is satisfied.
+ * Once a ticket gets put onto the reserveq, it will only return after the
+ * needed reservation is satisfied.
  *
  * This function is structured so that it has a lock free fast path. This is
  * necessary because every new transaction reservation will come through this
@@ -2490,113 +2612,53 @@ restart:
  * every pass.
  *
  * As tickets are only ever moved on and off the reserveq under the
- * l_grant_reserve_lock, we only need to take that lock if we are going
- * to add the ticket to the queue and sleep. We can avoid taking the lock if the
- * ticket was never added to the reserveq because the t_queue list head will be
- * empty and we hold the only reference to it so it can safely be checked
- * unlocked.
+ * l_grant_reserve_lock, we only need to take that lock if we are going to add
+ * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
+ * was never added to the reserveq because the t_queue list head will be empty
+ * and we hold the only reference to it so it can safely be checked unlocked.
  */
 STATIC int
-xlog_grant_log_space(xlog_t       *log,
-                    xlog_ticket_t *tic)
+xlog_grant_log_space(
+       struct log              *log,
+       struct xlog_ticket      *tic)
 {
-       int              free_bytes;
-       int              need_bytes;
+       int                     free_bytes, need_bytes;
+       int                     error = 0;
 
-#ifdef DEBUG
-       if (log->l_flags & XLOG_ACTIVE_RECOVERY)
-               panic("grant Recovery problem");
-#endif
+       ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 
        trace_xfs_log_grant_enter(log, tic);
 
+       /*
+        * If there are other waiters on the queue then give them a chance at
+        * logspace before us.  Wake up the first waiters, if we do not wake
+        * up all the waiters then go to sleep waiting for more free space,
+        * otherwise try to get some space for this transaction.
+        */
        need_bytes = tic->t_unit_res;
        if (tic->t_flags & XFS_LOG_PERM_RESERV)
                need_bytes *= tic->t_ocnt;
-
-       /* something is already sleeping; insert new transaction at end */
-       if (!list_empty_careful(&log->l_reserveq)) {
-               spin_lock(&log->l_grant_reserve_lock);
-               /* recheck the queue now we are locked */
-               if (list_empty(&log->l_reserveq)) {
-                       spin_unlock(&log->l_grant_reserve_lock);
-                       goto redo;
-               }
-               list_add_tail(&tic->t_queue, &log->l_reserveq);
-
-               trace_xfs_log_grant_sleep1(log, tic);
-
-               /*
-                * Gotta check this before going to sleep, while we're
-                * holding the grant lock.
-                */
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
-               /*
-                * If we got an error, and the filesystem is shutting down,
-                * we'll catch it down below. So just continue...
-                */
-               trace_xfs_log_grant_wake1(log, tic);
-       }
-
-redo:
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
-
        free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
-       if (free_bytes < need_bytes) {
+       if (!list_empty_careful(&log->l_reserveq)) {
                spin_lock(&log->l_grant_reserve_lock);
-               if (list_empty(&tic->t_queue))
-                       list_add_tail(&tic->t_queue, &log->l_reserveq);
-
-               trace_xfs_log_grant_sleep2(log, tic);
-
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               xlog_grant_push_ail(log, need_bytes);
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
-               trace_xfs_log_grant_wake2(log, tic);
-               goto redo;
-       }
-
-       if (!list_empty(&tic->t_queue)) {
+               if (!xlog_reserveq_wake(log, &free_bytes) ||
+                   free_bytes < need_bytes)
+                       error = xlog_reserveq_wait(log, tic, need_bytes);
+               spin_unlock(&log->l_grant_reserve_lock);
+       } else if (free_bytes < need_bytes) {
                spin_lock(&log->l_grant_reserve_lock);
-               list_del_init(&tic->t_queue);
+               error = xlog_reserveq_wait(log, tic, need_bytes);
                spin_unlock(&log->l_grant_reserve_lock);
        }
+       if (error)
+               return error;
 
-       /* we've got enough space */
        xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
        xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
        trace_xfs_log_grant_exit(log, tic);
        xlog_verify_grant_tail(log);
        return 0;
-
-error_return_unlocked:
-       spin_lock(&log->l_grant_reserve_lock);
-error_return:
-       list_del_init(&tic->t_queue);
-       spin_unlock(&log->l_grant_reserve_lock);
-       trace_xfs_log_grant_error(log, tic);
-
-       /*
-        * If we are failing, make sure the ticket doesn't have any
-        * current reservations. We don't want to add this back when
-        * the ticket/transaction gets cancelled.
-        */
-       tic->t_curr_res = 0;
-       tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-       return XFS_ERROR(EIO);
-}      /* xlog_grant_log_space */
-
+}
 
 /*
  * Replenish the byte reservation required by moving the grant write head.
@@ -2605,10 +2667,12 @@ error_return:
  * free fast path.
  */
 STATIC int
-xlog_regrant_write_log_space(xlog_t       *log,
-                            xlog_ticket_t *tic)
+xlog_regrant_write_log_space(
+       struct log              *log,
+       struct xlog_ticket      *tic)
 {
-       int             free_bytes, need_bytes;
+       int                     free_bytes, need_bytes;
+       int                     error = 0;
 
        tic->t_curr_res = tic->t_unit_res;
        xlog_tic_reset_res(tic);
@@ -2616,104 +2680,38 @@ xlog_regrant_write_log_space(xlog_t       *log,
        if (tic->t_cnt > 0)
                return 0;
 
-#ifdef DEBUG
-       if (log->l_flags & XLOG_ACTIVE_RECOVERY)
-               panic("regrant Recovery problem");
-#endif
+       ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 
        trace_xfs_log_regrant_write_enter(log, tic);
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
 
-       /* If there are other waiters on the queue then give them a
-        * chance at logspace before us. Wake up the first waiters,
-        * if we do not wake up all the waiters then go to sleep waiting
-        * for more free space, otherwise try to get some space for
-        * this transaction.
+       /*
+        * If there are other waiters on the queue then give them a chance at
+        * logspace before us.  Wake up the first waiters, if we do not wake
+        * up all the waiters then go to sleep waiting for more free space,
+        * otherwise try to get some space for this transaction.
         */
        need_bytes = tic->t_unit_res;
-       if (!list_empty_careful(&log->l_writeq)) {
-               struct xlog_ticket *ntic;
-
-               spin_lock(&log->l_grant_write_lock);
-               free_bytes = xlog_space_left(log, &log->l_grant_write_head);
-               list_for_each_entry(ntic, &log->l_writeq, t_queue) {
-                       ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
-
-                       if (free_bytes < ntic->t_unit_res)
-                               break;
-                       free_bytes -= ntic->t_unit_res;
-                       wake_up(&ntic->t_wait);
-               }
-
-               if (ntic != list_first_entry(&log->l_writeq,
-                                               struct xlog_ticket, t_queue)) {
-                       if (list_empty(&tic->t_queue))
-                               list_add_tail(&tic->t_queue, &log->l_writeq);
-                       trace_xfs_log_regrant_write_sleep1(log, tic);
-
-                       xlog_grant_push_ail(log, need_bytes);
-
-                       XFS_STATS_INC(xs_sleep_logspace);
-                       xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-                       trace_xfs_log_regrant_write_wake1(log, tic);
-               } else
-                       spin_unlock(&log->l_grant_write_lock);
-       }
-
-redo:
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
-
        free_bytes = xlog_space_left(log, &log->l_grant_write_head);
-       if (free_bytes < need_bytes) {
+       if (!list_empty_careful(&log->l_writeq)) {
                spin_lock(&log->l_grant_write_lock);
-               if (list_empty(&tic->t_queue))
-                       list_add_tail(&tic->t_queue, &log->l_writeq);
-
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               xlog_grant_push_ail(log, need_bytes);
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               trace_xfs_log_regrant_write_sleep2(log, tic);
-               xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-
-               trace_xfs_log_regrant_write_wake2(log, tic);
-               goto redo;
-       }
-
-       if (!list_empty(&tic->t_queue)) {
+               if (!xlog_writeq_wake(log, &free_bytes) ||
+                   free_bytes < need_bytes)
+                       error = xlog_writeq_wait(log, tic, need_bytes);
+               spin_unlock(&log->l_grant_write_lock);
+       } else if (free_bytes < need_bytes) {
                spin_lock(&log->l_grant_write_lock);
-               list_del_init(&tic->t_queue);
+               error = xlog_writeq_wait(log, tic, need_bytes);
                spin_unlock(&log->l_grant_write_lock);
        }
 
-       /* we've got enough space */
+       if (error)
+               return error;
+
        xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
        trace_xfs_log_regrant_write_exit(log, tic);
        xlog_verify_grant_tail(log);
        return 0;
-
-
- error_return_unlocked:
-       spin_lock(&log->l_grant_write_lock);
- error_return:
-       list_del_init(&tic->t_queue);
-       spin_unlock(&log->l_grant_write_lock);
-       trace_xfs_log_regrant_write_error(log, tic);
-
-       /*
-        * If we are failing, make sure the ticket doesn't have any
-        * current reservations. We don't want to add this back when
-        * the ticket/transaction gets cancelled.
-        */
-       tic->t_curr_res = 0;
-       tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-       return XFS_ERROR(EIO);
-}      /* xlog_regrant_write_log_space */
-
+}
 
 /* The first cnt-1 times through here we don't need to
  * move the grant write head because the permanent
index aa3dc1a..be5c51d 100644 (file)
@@ -770,6 +770,17 @@ restart:
        if (!xfs_iflock_nowait(ip)) {
                if (!(sync_mode & SYNC_WAIT))
                        goto out;
+
+               /*
+                * If we only have a single dirty inode in a cluster there is
+                * a fair chance that the AIL push may have pushed it into
+                * the buffer, but xfsbufd won't touch it until 30 seconds
+                * from now, and thus we will lock up here.
+                *
+                * Promote the inode buffer to the front of the delwri list
+                * and wake up xfsbufd now.
+                */
+               xfs_promote_inode(ip);
                xfs_iflock(ip);
        }
 
index f1d2802..4940357 100644 (file)
@@ -834,18 +834,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
index f4c38d8..2292d1a 100644 (file)
@@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs)
 __SYSCALL(__NR_setns, sys_setns)
 #define __NR_sendmmsg 269
 __SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+          compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+          compat_sys_process_vm_writev)
 
 #undef __NR_syscalls
-#define __NR_syscalls 270
+#define __NR_syscalls 272
 
 /*
  * All syscalls below here should go away really,
index f81676f..14b6cd0 100644 (file)
        {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0, 0, 0}
 
 #define r128_PCI_IDS \
index 4d4b59d..f4b8346 100644 (file)
@@ -205,61 +205,82 @@ struct bcma_bus {
        struct ssb_sprom sprom;
 };
 
-extern inline u32 bcma_read8(struct bcma_device *core, u16 offset)
+static inline u32 bcma_read8(struct bcma_device *core, u16 offset)
 {
        return core->bus->ops->read8(core, offset);
 }
-extern inline u32 bcma_read16(struct bcma_device *core, u16 offset)
+static inline u32 bcma_read16(struct bcma_device *core, u16 offset)
 {
        return core->bus->ops->read16(core, offset);
 }
-extern inline u32 bcma_read32(struct bcma_device *core, u16 offset)
+static inline u32 bcma_read32(struct bcma_device *core, u16 offset)
 {
        return core->bus->ops->read32(core, offset);
 }
-extern inline
+static inline
 void bcma_write8(struct bcma_device *core, u16 offset, u32 value)
 {
        core->bus->ops->write8(core, offset, value);
 }
-extern inline
+static inline
 void bcma_write16(struct bcma_device *core, u16 offset, u32 value)
 {
        core->bus->ops->write16(core, offset, value);
 }
-extern inline
+static inline
 void bcma_write32(struct bcma_device *core, u16 offset, u32 value)
 {
        core->bus->ops->write32(core, offset, value);
 }
 #ifdef CONFIG_BCMA_BLOCKIO
-extern inline void bcma_block_read(struct bcma_device *core, void *buffer,
+static inline void bcma_block_read(struct bcma_device *core, void *buffer,
                                   size_t count, u16 offset, u8 reg_width)
 {
        core->bus->ops->block_read(core, buffer, count, offset, reg_width);
 }
-extern inline void bcma_block_write(struct bcma_device *core, const void *buffer,
-                                   size_t count, u16 offset, u8 reg_width)
+static inline void bcma_block_write(struct bcma_device *core,
+                                   const void *buffer, size_t count,
+                                   u16 offset, u8 reg_width)
 {
        core->bus->ops->block_write(core, buffer, count, offset, reg_width);
 }
 #endif
-extern inline u32 bcma_aread32(struct bcma_device *core, u16 offset)
+static inline u32 bcma_aread32(struct bcma_device *core, u16 offset)
 {
        return core->bus->ops->aread32(core, offset);
 }
-extern inline
+static inline
 void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value)
 {
        core->bus->ops->awrite32(core, offset, value);
 }
 
-#define bcma_mask32(cc, offset, mask) \
-       bcma_write32(cc, offset, bcma_read32(cc, offset) & (mask))
-#define bcma_set32(cc, offset, set) \
-       bcma_write32(cc, offset, bcma_read32(cc, offset) | (set))
-#define bcma_maskset32(cc, offset, mask, set) \
-       bcma_write32(cc, offset, (bcma_read32(cc, offset) & (mask)) | (set))
+static inline void bcma_mask32(struct bcma_device *cc, u16 offset, u32 mask)
+{
+       bcma_write32(cc, offset, bcma_read32(cc, offset) & mask);
+}
+static inline void bcma_set32(struct bcma_device *cc, u16 offset, u32 set)
+{
+       bcma_write32(cc, offset, bcma_read32(cc, offset) | set);
+}
+static inline void bcma_maskset32(struct bcma_device *cc,
+                                 u16 offset, u32 mask, u32 set)
+{
+       bcma_write32(cc, offset, (bcma_read32(cc, offset) & mask) | set);
+}
+static inline void bcma_mask16(struct bcma_device *cc, u16 offset, u16 mask)
+{
+       bcma_write16(cc, offset, bcma_read16(cc, offset) & mask);
+}
+static inline void bcma_set16(struct bcma_device *cc, u16 offset, u16 set)
+{
+       bcma_write16(cc, offset, bcma_read16(cc, offset) | set);
+}
+static inline void bcma_maskset16(struct bcma_device *cc,
+                                 u16 offset, u16 mask, u16 set)
+{
+       bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set);
+}
 
 extern bool bcma_core_is_enabled(struct bcma_device *core);
 extern void bcma_core_disable(struct bcma_device *core, u32 flags);
index 1526d96..a33086a 100644 (file)
 #define BCMA_CC_PMU_CTL                        0x0600 /* PMU control */
 #define  BCMA_CC_PMU_CTL_ILP_DIV       0xFFFF0000 /* ILP div mask */
 #define  BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16
+#define  BCMA_CC_PMU_CTL_PLL_UPD       0x00000400
 #define  BCMA_CC_PMU_CTL_NOILPONW      0x00000200 /* No ILP on wait */
 #define  BCMA_CC_PMU_CTL_HTREQEN       0x00000100 /* HT req enable */
 #define  BCMA_CC_PMU_CTL_ALPREQEN      0x00000080 /* ALP req enable */
index c7a6d3b..94acd81 100644 (file)
@@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *);
  */
 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
                                        spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
-                                                          request_fn_proc *,
-                                                          spinlock_t *, int node_id);
 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
 extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
                                                      request_fn_proc *, spinlock_t *);
index c86c940..081147d 100644 (file)
@@ -71,7 +71,7 @@ struct timecounter {
 
 /**
  * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @tc:                Pointer to cycle counter.
+ * @cc:                Pointer to cycle counter.
  * @cycles:    Cycles
  *
  * XXX - This could use some mult_lxl_ll() asm optimization. Same code
@@ -114,7 +114,7 @@ extern u64 timecounter_read(struct timecounter *tc);
  *                        time base as values returned by
  *                        timecounter_read()
  * @tc:                Pointer to time counter.
- * @cycle:     a value returned by tc->cc->read()
+ * @cycle_tstamp:      a value returned by tc->cc->read()
  *
  * Cycle counts that are converted correctly as long as they
  * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
@@ -156,11 +156,12 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
  * @mult:              cycle to nanosecond multiplier
  * @shift:             cycle to nanosecond divisor (power of two)
  * @max_idle_ns:       max idle time permitted by the clocksource (nsecs)
- * @maxadj             maximum adjustment value to mult (~11%)
+ * @maxadj:            maximum adjustment value to mult (~11%)
  * @flags:             flags describing special properties
  * @archdata:          arch-specific data
  * @suspend:           suspend function for the clocksource, if necessary
  * @resume:            resume function for the clocksource, if necessary
+ * @cycle_last:                most recent cycle counter value seen by ::read()
  */
 struct clocksource {
        /*
@@ -187,6 +188,7 @@ struct clocksource {
        void (*suspend)(struct clocksource *cs);
        void (*resume)(struct clocksource *cs);
 
+       /* private: */
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
        /* Watchdog related data, used by the framework */
        struct list_head wd_list;
@@ -261,6 +263,9 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
 
 /**
  * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
+ * @cycles:    cycles
+ * @mult:      cycle to nanosecond multiplier
+ * @shift:     cycle to nanosecond divisor (power of two)
  *
  * Converts cycles to nanoseconds, using the given mult and shift.
  *
index 154bf56..66ed067 100644 (file)
@@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
 
 extern void __user *compat_alloc_user_space(unsigned long len);
 
+asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
+               const struct compat_iovec __user *lvec,
+               unsigned long liovcnt, const struct compat_iovec __user *rvec,
+               unsigned long riovcnt, unsigned long flags);
+asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+               const struct compat_iovec __user *lvec,
+               unsigned long liovcnt, const struct compat_iovec __user *rvec,
+               unsigned long riovcnt, unsigned long flags);
+
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */
index 4df9261..ed9f74f 100644 (file)
@@ -339,7 +339,8 @@ extern int d_validate(struct dentry *, struct dentry *);
  */
 extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
 
-extern char *__d_path(const struct path *path, struct path *root, char *, int);
+extern char *__d_path(const struct path *, const struct path *, char *, int);
+extern char *d_absolute_path(const struct path *, char *, int);
 extern char *d_path(const struct path *, char *, int);
 extern char *d_path_with_unreachable(const struct path *, char *, int);
 extern char *dentry_path_raw(struct dentry *, char *, int);
index ef90cbd..57c9a8a 100644 (file)
@@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
 extern int iommu_calculate_agaw(struct intel_iommu *iommu);
 extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
 extern int dmar_disabled;
+extern int intel_iommu_enabled;
 #else
 static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
 {
@@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu)
 {
 }
 #define dmar_disabled  (1)
+#define intel_iommu_enabled (0)
 #endif
 
 
index c9f522b..fd0628b 100644 (file)
@@ -25,7 +25,7 @@ struct sock_extended_err {
 #ifdef __KERNEL__
 
 #include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
@@ -34,7 +34,7 @@ struct sock_extended_err {
 struct sock_exterr_skb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
index 20db5b2..b38bf69 100644 (file)
@@ -543,9 +543,15 @@ struct compat_ethtool_rxnfc {
 /**
  * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection
  * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR
- * @size: On entry, the array size of the user buffer.  On return from
- *     %ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table.
+ * @size: On entry, the array size of the user buffer, which may be zero.
+ *     On return from %ETHTOOL_GRXFHINDIR, the array size of the hardware
+ *     indirection table.
  * @ring_index: RX ring/queue index for each hash value
+ *
+ * For %ETHTOOL_GRXFHINDIR, a @size of zero means that only the size
+ * should be returned.  For %ETHTOOL_SRXFHINDIR, a @size of zero means
+ * the table should be reset to default values.  This last feature
+ * is not supported by the original implementations.
  */
 struct ethtool_rxfh_indir {
        __u32   cmd;
@@ -748,6 +754,18 @@ struct net_device;
 /* Some generic methods drivers may use in their ethtool_ops */
 u32 ethtool_op_get_link(struct net_device *dev);
 
+/**
+ * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
+ * @index: Index in RX flow hash indirection table
+ * @n_rx_rings: Number of RX rings to use
+ *
+ * This function provides the default policy for RX flow hash indirection.
+ */
+static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
+{
+       return index % n_rx_rings;
+}
+
 /**
  * struct ethtool_ops - optional netdev operations
  * @get_settings: Get various device settings including Ethernet link
@@ -827,9 +845,13 @@ u32 ethtool_op_get_link(struct net_device *dev);
  *     error code or zero.
  * @set_rx_ntuple: Set an RX n-tuple rule.  Returns a negative error code
  *     or zero.
+ * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
+ *     Returns zero if not supported for this specific device.
  * @get_rxfh_indir: Get the contents of the RX flow hash indirection table.
+ *     Will not be called if @get_rxfh_indir_size returns zero.
  *     Returns a negative error code or zero.
  * @set_rxfh_indir: Set the contents of the RX flow hash indirection table.
+ *     Will not be called if @get_rxfh_indir_size returns zero.
  *     Returns a negative error code or zero.
  * @get_channels: Get number of channels.
  * @set_channels: Set number of channels.  Returns a negative error code or
@@ -893,10 +915,9 @@ struct ethtool_ops {
        int     (*reset)(struct net_device *, u32 *);
        int     (*set_rx_ntuple)(struct net_device *,
                                 struct ethtool_rx_ntuple *);
-       int     (*get_rxfh_indir)(struct net_device *,
-                                 struct ethtool_rxfh_indir *);
-       int     (*set_rxfh_indir)(struct net_device *,
-                                 const struct ethtool_rxfh_indir *);
+       u32     (*get_rxfh_indir_size)(struct net_device *);
+       int     (*get_rxfh_indir)(struct net_device *, u32 *);
+       int     (*set_rxfh_indir)(struct net_device *, const u32 *);
        void    (*get_channels)(struct net_device *, struct ethtool_channels *);
        int     (*set_channels)(struct net_device *, struct ethtool_channels *);
        int     (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
index e313022..e0bc4ff 100644 (file)
@@ -393,8 +393,8 @@ struct inodes_stat_t {
 #include <linux/semaphore.h>
 #include <linux/fiemap.h>
 #include <linux/rculist_bl.h>
-#include <linux/shrinker.h>
 #include <linux/atomic.h>
+#include <linux/shrinker.h>
 
 #include <asm/byteorder.h>
 
@@ -1942,6 +1942,7 @@ extern int fd_statfs(int, struct kstatfs *);
 extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
 extern int freeze_super(struct super_block *super);
 extern int thaw_super(struct super_block *super);
+extern bool our_mnt(struct vfsmount *mnt);
 
 extern int current_umask(void);
 
index 96efa67..c3da42d 100644 (file)
@@ -172,6 +172,7 @@ enum {
        TRACE_EVENT_FL_FILTERED_BIT,
        TRACE_EVENT_FL_RECORDED_CMD_BIT,
        TRACE_EVENT_FL_CAP_ANY_BIT,
+       TRACE_EVENT_FL_NO_SET_FILTER_BIT,
 };
 
 enum {
@@ -179,6 +180,7 @@ enum {
        TRACE_EVENT_FL_FILTERED         = (1 << TRACE_EVENT_FL_FILTERED_BIT),
        TRACE_EVENT_FL_RECORDED_CMD     = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
        TRACE_EVENT_FL_CAP_ANY          = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
+       TRACE_EVENT_FL_NO_SET_FILTER    = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
 };
 
 struct ftrace_event_call {
index 66cedf6..210e2c3 100644 (file)
@@ -544,6 +544,15 @@ static inline int ieee80211_is_qos_nullfunc(__le16 fc)
               cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
 }
 
+/**
+ * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
+ * @seq_ctrl: frame sequence control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_first_frag(__le16 seq_ctrl)
+{
+       return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0;
+}
+
 struct ieee80211s_hdr {
        u8 flags;
        u8 ttl;
@@ -1694,6 +1703,23 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
        return false;
 }
 
+/**
+ * ieee80211_is_public_action - check if frame is a public action frame
+ * @hdr: the frame
+ * @len: length of the frame
+ */
+static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
+                                             size_t len)
+{
+       struct ieee80211_mgmt *mgmt = (void *)hdr;
+
+       if (len < IEEE80211_MIN_ACTION_SIZE)
+               return false;
+       if (!ieee80211_is_action(hdr->frame_control))
+               return false;
+       return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC;
+}
+
 /**
  * ieee80211_fhss_chan_to_freq - get channel frequency
  * @channel: the FHSS channel
index e473003..56d907a 100644 (file)
@@ -79,6 +79,7 @@
 #define ETH_P_PAE      0x888E          /* Port Access Entity (IEEE 802.1X) */
 #define ETH_P_AOE      0x88A2          /* ATA over Ethernet            */
 #define ETH_P_8021AD   0x88A8          /* 802.1ad Service VLAN         */
+#define ETH_P_802_EX1  0x88B5          /* 802.1 Local Experimental 1.  */
 #define ETH_P_TIPC     0x88CA          /* TIPC                         */
 #define ETH_P_8021AH   0x88E7          /* 802.1ah Backbone Service Tag */
 #define ETH_P_1588     0x88F7          /* IEEE 1588 Timesync */
index 070ac50..13aff1e 100644 (file)
@@ -74,22 +74,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-/* if this changes, algorithm will have to be reworked because this
- * depends on completely exhausting the VLAN identifier space.  Thus
- * it gives constant time look-up, but in many cases it wastes memory.
- */
-#define VLAN_GROUP_ARRAY_SPLIT_PARTS  8
-#define VLAN_GROUP_ARRAY_PART_LEN     (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
-
-struct vlan_group {
-       struct net_device       *real_dev; /* The ethernet(like) device
-                                           * the vlan is attached to.
-                                           */
-       unsigned int            nr_vlans;
-       struct hlist_node       hlist;  /* linked list */
-       struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
-       struct rcu_head         rcu;
-};
+struct vlan_info;
 
 static inline int is_vlan_dev(struct net_device *dev)
 {
@@ -109,6 +94,13 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
 extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
 extern struct sk_buff *vlan_untag(struct sk_buff *skb);
 
+extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
+extern void vlan_vid_del(struct net_device *dev, unsigned short vid);
+
+extern int vlan_vids_add_by_dev(struct net_device *dev,
+                               const struct net_device *by_dev);
+extern void vlan_vids_del_by_dev(struct net_device *dev,
+                                const struct net_device *by_dev);
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -139,6 +131,26 @@ static inline struct sk_buff *vlan_untag(struct sk_buff *skb)
 {
        return skb;
 }
+
+static inline int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+       return 0;
+}
+
+static inline void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+}
+
+static inline int vlan_vids_add_by_dev(struct net_device *dev,
+                                      const struct net_device *by_dev)
+{
+       return 0;
+}
+
+static inline void vlan_vids_del_by_dev(struct net_device *dev,
+                                       const struct net_device *by_dev)
+{
+}
 #endif
 
 /**
@@ -386,7 +398,7 @@ struct vlan_ioctl_args {
                unsigned int skb_priority;
                unsigned int name_type;
                unsigned int bind_type;
-               unsigned int flag; /* Matches vlan_dev_info flags */
+               unsigned int flag; /* Matches vlan_dev_priv flags */
         } u;
 
        short vlan_qos;   
index abf5028..afa5d5c 100644 (file)
@@ -22,7 +22,7 @@ struct inet_diag_sockid {
 
 /* Request structure */
 
-struct inet_diag_req {
+struct inet_diag_req_compat {
        __u8    idiag_family;           /* Family of addresses. */
        __u8    idiag_src_len;
        __u8    idiag_dst_len;
@@ -34,6 +34,15 @@ struct inet_diag_req {
        __u32   idiag_dbs;              /* Tables to dump (NI) */
 };
 
+struct inet_diag_req {
+       __u8    sdiag_family;
+       __u8    sdiag_protocol;
+       __u8    idiag_ext;
+       __u8    pad;
+       __u32   idiag_states;
+       struct inet_diag_sockid id;
+};
+
 enum {
        INET_DIAG_REQ_NONE,
        INET_DIAG_REQ_BYTECODE,
@@ -125,16 +134,41 @@ struct tcpvegas_info {
 #ifdef __KERNEL__
 struct sock;
 struct inet_hashinfo;
+struct nlattr;
+struct nlmsghdr;
+struct sk_buff;
+struct netlink_callback;
 
 struct inet_diag_handler {
-       struct inet_hashinfo    *idiag_hashinfo;
+       void                    (*dump)(struct sk_buff *skb,
+                                       struct netlink_callback *cb,
+                                       struct inet_diag_req *r,
+                                       struct nlattr *bc);
+
+       int                     (*dump_one)(struct sk_buff *in_skb,
+                                       const struct nlmsghdr *nlh,
+                                       struct inet_diag_req *req);
+
        void                    (*idiag_get_info)(struct sock *sk,
                                                  struct inet_diag_msg *r,
                                                  void *info);
-       __u16                   idiag_info_size;
        __u16                   idiag_type;
 };
 
+struct inet_connection_sock;
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+                             struct sk_buff *skb, struct inet_diag_req *req,
+                             u32 pid, u32 seq, u16 nlmsg_flags,
+                             const struct nlmsghdr *unlh);
+void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
+               struct netlink_callback *cb, struct inet_diag_req *r,
+               struct nlattr *bc);
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
+               struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req);
+
+int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+
 extern int  inet_diag_register(const struct inet_diag_handler *handler);
 extern void inet_diag_unregister(const struct inet_diag_handler *handler);
 #endif /* __KERNEL__ */
index 94b1e35..32574ee 100644 (file)
@@ -126,6 +126,8 @@ extern struct cred init_cred;
 # define INIT_PERF_EVENTS(tsk)
 #endif
 
+#define INIT_TASK_COMM "swapper"
+
 /*
  *  INIT_TASK is used to set up the first task table, touch at
  * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,7 +164,7 @@ extern struct cred init_cred;
        .group_leader   = &tsk,                                         \
        RCU_INIT_POINTER(.real_cred, &init_cred),                       \
        RCU_INIT_POINTER(.cred, &init_cred),                            \
-       .comm           = "swapper",                                    \
+       .comm           = INIT_TASK_COMM,                               \
        .thread         = INIT_THREAD,                                  \
        .fs             = &init_fs,                                     \
        .files          = &init_files,                                  \
index 0c99776..6318268 100644 (file)
@@ -404,7 +404,7 @@ struct tcp6_sock {
 
 extern int inet6_sk_rebuild_header(struct sock *sk);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
 {
        return inet_sk(__sk)->pinet6;
@@ -515,7 +515,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
 #define inet6_rcv_saddr(__sk)  NULL
 #define tcp_twsk_ipv6only(__sk)                0
 #define inet_v6_ipv6only(__sk)         0
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 
 #define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\
        (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)   && \
index ff9abff..90b0656 100644 (file)
@@ -301,7 +301,7 @@ static inline int __nlm_privileged_request4(const struct sockaddr *sap)
        return ipv4_is_loopback(sin->sin_addr.s_addr);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 {
        const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
@@ -314,12 +314,12 @@ static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 
        return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK;
 }
-#else  /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#else  /* IS_ENABLED(CONFIG_IPV6) */
 static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 {
        return 0;
 }
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 
 /*
  * Ensure incoming requests are from local privileged callers.
index 25b8086..fd7ff3d 100644 (file)
@@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
 #define rounddown_pow_of_two(n)                        \
 (                                              \
        __builtin_constant_p(n) ? (             \
-               (n == 1) ? 0 :                  \
                (1UL << ilog2(n))) :            \
        __rounddown_pow_of_two(n)               \
  )
index b87068a..9b296ea 100644 (file)
@@ -85,6 +85,9 @@ extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
 
+extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
+
 static inline
 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
 {
@@ -381,5 +384,25 @@ mem_cgroup_print_bad_page(struct page *page)
 }
 #endif
 
+enum {
+       UNDER_LIMIT,
+       SOFT_LIMIT,
+       OVER_LIMIT,
+};
+
+#ifdef CONFIG_INET
+struct sock;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+void sock_update_memcg(struct sock *sk);
+void sock_release_memcg(struct sock *sk);
+#else
+static inline void sock_update_memcg(struct sock *sk)
+{
+}
+static inline void sock_release_memcg(struct sock *sk)
+{
+}
+#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+#endif /* CONFIG_INET */
 #endif /* _LINUX_MEMCONTROL_H */
 
index b56e458..9958ff2 100644 (file)
@@ -59,12 +59,15 @@ enum {
        MLX4_CMD_HW_HEALTH_CHECK = 0x50,
        MLX4_CMD_SET_PORT        = 0xc,
        MLX4_CMD_SET_NODE        = 0x5a,
+       MLX4_CMD_QUERY_FUNC      = 0x56,
        MLX4_CMD_ACCESS_DDR      = 0x2e,
        MLX4_CMD_MAP_ICM         = 0xffa,
        MLX4_CMD_UNMAP_ICM       = 0xff9,
        MLX4_CMD_MAP_ICM_AUX     = 0xffc,
        MLX4_CMD_UNMAP_ICM_AUX   = 0xffb,
        MLX4_CMD_SET_ICM_SIZE    = 0xffd,
+       /*master notify fw on finish for slave's flr*/
+       MLX4_CMD_INFORM_FLR_DONE = 0x5b,
 
        /* TPT commands */
        MLX4_CMD_SW2HW_MPT       = 0xd,
@@ -119,6 +122,26 @@ enum {
        /* miscellaneous commands */
        MLX4_CMD_DIAG_RPRT       = 0x30,
        MLX4_CMD_NOP             = 0x31,
+       MLX4_CMD_ACCESS_MEM      = 0x2e,
+       MLX4_CMD_SET_VEP         = 0x52,
+
+       /* Ethernet specific commands */
+       MLX4_CMD_SET_VLAN_FLTR   = 0x47,
+       MLX4_CMD_SET_MCAST_FLTR  = 0x48,
+       MLX4_CMD_DUMP_ETH_STATS  = 0x49,
+
+       /* Communication channel commands */
+       MLX4_CMD_ARM_COMM_CHANNEL = 0x57,
+       MLX4_CMD_GEN_EQE         = 0x58,
+
+       /* virtual commands */
+       MLX4_CMD_ALLOC_RES       = 0xf00,
+       MLX4_CMD_FREE_RES        = 0xf01,
+       MLX4_CMD_MCAST_ATTACH    = 0xf05,
+       MLX4_CMD_UCAST_ATTACH    = 0xf06,
+       MLX4_CMD_PROMISC         = 0xf08,
+       MLX4_CMD_QUERY_FUNC_CAP  = 0xf0a,
+       MLX4_CMD_QP_ATTACH       = 0xf0b,
 
        /* debug commands */
        MLX4_CMD_QUERY_DEBUG_MSG = 0x2a,
@@ -126,6 +149,7 @@ enum {
 
        /* statistics commands */
        MLX4_CMD_QUERY_IF_STAT   = 0X54,
+       MLX4_CMD_SET_IF_STAT     = 0X55,
 };
 
 enum {
@@ -135,7 +159,8 @@ enum {
 };
 
 enum {
-       MLX4_MAILBOX_SIZE       =  4096
+       MLX4_MAILBOX_SIZE       = 4096,
+       MLX4_ACCESS_MEM_ALIGN   = 256,
 };
 
 enum {
@@ -148,6 +173,11 @@ enum {
        MLX4_SET_PORT_GID_TABLE = 0x5,
 };
 
+enum {
+       MLX4_CMD_WRAPPED,
+       MLX4_CMD_NATIVE
+};
+
 struct mlx4_dev;
 
 struct mlx4_cmd_mailbox {
@@ -157,23 +187,24 @@ struct mlx4_cmd_mailbox {
 
 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
               int out_is_imm, u32 in_modifier, u8 op_modifier,
-              u16 op, unsigned long timeout);
+              u16 op, unsigned long timeout, int native);
 
 /* Invoke a command with no output parameter */
 static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier,
-                          u8 op_modifier, u16 op, unsigned long timeout)
+                          u8 op_modifier, u16 op, unsigned long timeout,
+                          int native)
 {
        return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier,
-                         op_modifier, op, timeout);
+                         op_modifier, op, timeout, native);
 }
 
 /* Invoke a command with an output mailbox */
 static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param,
                               u32 in_modifier, u8 op_modifier, u16 op,
-                              unsigned long timeout)
+                              unsigned long timeout, int native)
 {
        return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier,
-                         op_modifier, op, timeout);
+                         op_modifier, op, timeout, native);
 }
 
 /*
@@ -183,13 +214,17 @@ static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param
  */
 static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                               u32 in_modifier, u8 op_modifier, u16 op,
-                              unsigned long timeout)
+                              unsigned long timeout, int native)
 {
        return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier,
-                         op_modifier, op, timeout);
+                         op_modifier, op, timeout, native);
 }
 
 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
 
+u32 mlx4_comm_get_version(void);
+
+#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
+
 #endif /* MLX4_CMD_H */
index ca2c397..5c4fe8e 100644 (file)
@@ -47,6 +47,9 @@
 enum {
        MLX4_FLAG_MSI_X         = 1 << 0,
        MLX4_FLAG_OLD_PORT_CMDS = 1 << 1,
+       MLX4_FLAG_MASTER        = 1 << 2,
+       MLX4_FLAG_SLAVE         = 1 << 3,
+       MLX4_FLAG_SRIOV         = 1 << 4,
 };
 
 enum {
@@ -57,6 +60,15 @@ enum {
        MLX4_BOARD_ID_LEN = 64
 };
 
+enum {
+       MLX4_MAX_NUM_PF         = 16,
+       MLX4_MAX_NUM_VF         = 64,
+       MLX4_MFUNC_MAX          = 80,
+       MLX4_MFUNC_EQ_NUM       = 4,
+       MLX4_MFUNC_MAX_EQES     = 8,
+       MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
+};
+
 enum {
        MLX4_DEV_CAP_FLAG_RC            = 1LL <<  0,
        MLX4_DEV_CAP_FLAG_UC            = 1LL <<  1,
@@ -82,7 +94,8 @@ enum {
        MLX4_DEV_CAP_FLAG_UDP_RSS       = 1LL << 40,
        MLX4_DEV_CAP_FLAG_VEP_UC_STEER  = 1LL << 41,
        MLX4_DEV_CAP_FLAG_VEP_MC_STEER  = 1LL << 42,
-       MLX4_DEV_CAP_FLAG_COUNTERS      = 1LL << 48
+       MLX4_DEV_CAP_FLAG_COUNTERS      = 1LL << 48,
+       MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55
 };
 
 #define MLX4_ATTR_EXTENDED_PORT_INFO   cpu_to_be16(0xff90)
@@ -117,7 +130,11 @@ enum mlx4_event {
        MLX4_EVENT_TYPE_PORT_CHANGE        = 0x09,
        MLX4_EVENT_TYPE_EQ_OVERFLOW        = 0x0f,
        MLX4_EVENT_TYPE_ECC_DETECT         = 0x0e,
-       MLX4_EVENT_TYPE_CMD                = 0x0a
+       MLX4_EVENT_TYPE_CMD                = 0x0a,
+       MLX4_EVENT_TYPE_VEP_UPDATE         = 0x19,
+       MLX4_EVENT_TYPE_COMM_CHANNEL       = 0x18,
+       MLX4_EVENT_TYPE_FLR_EVENT          = 0x1c,
+       MLX4_EVENT_TYPE_NONE               = 0xff,
 };
 
 enum {
@@ -184,6 +201,7 @@ enum mlx4_qp_region {
 };
 
 enum mlx4_port_type {
+       MLX4_PORT_TYPE_NONE     = 0,
        MLX4_PORT_TYPE_IB       = 1,
        MLX4_PORT_TYPE_ETH      = 2,
        MLX4_PORT_TYPE_AUTO     = 3
@@ -216,6 +234,7 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
 
 struct mlx4_caps {
        u64                     fw_ver;
+       u32                     function;
        int                     num_ports;
        int                     vl_cap[MLX4_MAX_PORTS + 1];
        int                     ib_mtu_cap[MLX4_MAX_PORTS + 1];
@@ -230,6 +249,7 @@ struct mlx4_caps {
        u64                     trans_code[MLX4_MAX_PORTS + 1];
        int                     local_ca_ack_delay;
        int                     num_uars;
+       u32                     uar_page_size;
        int                     bf_reg_size;
        int                     bf_regs_per_page;
        int                     max_sq_sg;
@@ -253,8 +273,7 @@ struct mlx4_caps {
        int                     num_comp_vectors;
        int                     comp_pool;
        int                     num_mpts;
-       int                     num_mtt_segs;
-       int                     mtts_per_seg;
+       int                     num_mtts;
        int                     fmr_reserved_mtts;
        int                     reserved_mtts;
        int                     reserved_mrws;
@@ -284,7 +303,9 @@ struct mlx4_caps {
        int                     log_num_prios;
        enum mlx4_port_type     port_type[MLX4_MAX_PORTS + 1];
        u8                      supported_type[MLX4_MAX_PORTS + 1];
-       u32                     port_mask;
+       u8                      suggested_type[MLX4_MAX_PORTS + 1];
+       u8                      default_sense[MLX4_MAX_PORTS + 1];
+       u32                     port_mask[MLX4_MAX_PORTS + 1];
        enum mlx4_port_type     possible_type[MLX4_MAX_PORTS + 1];
        u32                     max_counters;
        u8                      ext_port_cap[MLX4_MAX_PORTS + 1];
@@ -304,7 +325,7 @@ struct mlx4_buf {
 };
 
 struct mlx4_mtt {
-       u32                     first_seg;
+       u32                     offset;
        int                     order;
        int                     page_shift;
 };
@@ -466,10 +487,12 @@ struct mlx4_counter {
 struct mlx4_dev {
        struct pci_dev         *pdev;
        unsigned long           flags;
+       unsigned long           num_slaves;
        struct mlx4_caps        caps;
        struct radix_tree_root  qp_table_tree;
        u8                      rev_id;
        char                    board_id[MLX4_BOARD_ID_LEN];
+       int                     num_vfs;
 };
 
 struct mlx4_init_port_param {
@@ -488,14 +511,32 @@ struct mlx4_init_port_param {
 
 #define mlx4_foreach_port(port, dev, type)                             \
        for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)     \
-               if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
-                    ~(dev)->caps.port_mask) & 1 << ((port) - 1))
+               if ((type) == (dev)->caps.port_mask[(port)])
 
-#define mlx4_foreach_ib_transport_port(port, dev)                      \
-       for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)     \
-               if (((dev)->caps.port_mask & 1 << ((port) - 1)) ||      \
-                   ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+#define mlx4_foreach_ib_transport_port(port, dev)                         \
+       for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)       \
+               if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
+                       ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
 
+static inline int mlx4_is_master(struct mlx4_dev *dev)
+{
+       return dev->flags & MLX4_FLAG_MASTER;
+}
+
+static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
+{
+       return (qpn < dev->caps.sqp_start + 8);
+}
+
+static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
+{
+       return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
+}
+
+static inline int mlx4_is_slave(struct mlx4_dev *dev)
+{
+       return dev->flags & MLX4_FLAG_SLAVE;
+}
 
 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
                   struct mlx4_buf *buf);
@@ -561,6 +602,10 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm
 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
 
+int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                       int block_mcast_loopback, enum mlx4_protocol prot);
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                       enum mlx4_protocol prot);
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          int block_mcast_loopback, enum mlx4_protocol protocol);
 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
@@ -571,9 +616,11 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
 
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap);
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap);
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
 
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
index 3dc3a8c..4baadd1 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mmzone.h>
 #include <linux/rbtree.h>
 #include <linux/prio_tree.h>
+#include <linux/atomic.h>
 #include <linux/debug_locks.h>
 #include <linux/mm_types.h>
 #include <linux/range.h>
index 415f2db..c8ef9bc 100644 (file)
@@ -218,6 +218,7 @@ struct mmc_card {
 #define MMC_QUIRK_INAND_CMD38  (1<<6)          /* iNAND devices have broken CMD38 */
 #define MMC_QUIRK_BLK_NO_CMD23 (1<<7)          /* Avoid CMD23 for regular multiblock */
 #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8)  /* Avoid sending 512 bytes in */
+#define MMC_QUIRK_LONG_READ_TIME (1<<9)                /* Data read time > CSD says */
                                                /* byte mode */
        unsigned int    poweroff_notify_state;  /* eMMC4.5 notify feature */
 #define MMC_NO_POWER_NOTIFICATION      0
@@ -433,6 +434,11 @@ static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
        return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
 }
 
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+       return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
 #define mmc_card_name(c)       ((c)->cid.prod_name)
 #define mmc_card_id(c)         (dev_name(&(c)->dev))
 
index eef257c..a776a67 100644 (file)
@@ -55,7 +55,6 @@
 
 #include <linux/netdev_features.h>
 
-struct vlan_group;
 struct netpoll_info;
 struct phy_device;
 /* 802.11 specific */
@@ -598,7 +597,7 @@ struct rps_map {
        struct rcu_head rcu;
        u16 cpus[0];
 };
-#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
+#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
 
 /*
  * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
@@ -622,7 +621,7 @@ struct rps_dev_flow_table {
        struct rps_dev_flow flows[0];
 };
 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
-    (_num * sizeof(struct rps_dev_flow)))
+    ((_num) * sizeof(struct rps_dev_flow)))
 
 /*
  * The rps_sock_flow_table contains mappings of flows to the last CPU
@@ -633,7 +632,7 @@ struct rps_sock_flow_table {
        u16 ents[0];
 };
 #define        RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
-    (_num * sizeof(u16)))
+    ((_num) * sizeof(u16)))
 
 #define RPS_NO_CPU 0xffff
 
@@ -685,7 +684,7 @@ struct xps_map {
        struct rcu_head rcu;
        u16 queues[0];
 };
-#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
+#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map))   \
     / sizeof(u16))
 
@@ -792,11 +791,11 @@ struct netdev_tc_txq {
  *     3. Update dev->stats asynchronously and atomically, and define
  *        neither operation.
  *
- * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
  *     If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  *     this function is called when a VLAN id is registered.
  *
- * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
  *     If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  *     this function is called when a VLAN id is unregistered.
  *
@@ -911,9 +910,9 @@ struct net_device_ops {
                                                     struct rtnl_link_stats64 *storage);
        struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 
-       void                    (*ndo_vlan_rx_add_vid)(struct net_device *dev,
+       int                     (*ndo_vlan_rx_add_vid)(struct net_device *dev,
                                                       unsigned short vid);
-       void                    (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
+       int                     (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
                                                        unsigned short vid);
 #ifdef CONFIG_NET_POLL_CONTROLLER
        void                    (*ndo_poll_controller)(struct net_device *dev);
@@ -1096,7 +1095,7 @@ struct net_device {
        /* Protocol specific pointers */
 
 #if IS_ENABLED(CONFIG_VLAN_8021Q)
-       struct vlan_group __rcu *vlgrp;         /* VLAN group */
+       struct vlan_info __rcu  *vlan_info;     /* VLAN info */
 #endif
 #if IS_ENABLED(CONFIG_NET_DSA)
        struct dsa_switch_tree  *dsa_ptr;       /* dsa specific data */
index 8374d29..52e4895 100644 (file)
@@ -8,7 +8,7 @@
 #define NETLINK_UNUSED         1       /* Unused number                                */
 #define NETLINK_USERSOCK       2       /* Reserved for user mode socket protocols      */
 #define NETLINK_FIREWALL       3       /* Firewalling hook                             */
-#define NETLINK_INET_DIAG      4       /* INET socket monitoring                       */
+#define NETLINK_SOCK_DIAG      4       /* socket monitoring                            */
 #define NETLINK_NFLOG          5       /* netfilter/iptables ULOG */
 #define NETLINK_XFRM           6       /* ipsec */
 #define NETLINK_SELINUX                7       /* SELinux event notifications */
@@ -27,6 +27,8 @@
 #define NETLINK_RDMA           20
 #define NETLINK_CRYPTO         21      /* Crypto layer */
 
+#define NETLINK_INET_DIAG      NETLINK_SOCK_DIAG
+
 #define MAX_LINKS 32           
 
 struct sockaddr_nl {
index 36cb955..89fee4a 100644 (file)
@@ -62,6 +62,8 @@ enum nfc_commands {
        NFC_CMD_GET_DEVICE,
        NFC_CMD_DEV_UP,
        NFC_CMD_DEV_DOWN,
+       NFC_CMD_DEP_LINK_UP,
+       NFC_CMD_DEP_LINK_DOWN,
        NFC_CMD_START_POLL,
        NFC_CMD_STOP_POLL,
        NFC_CMD_GET_TARGET,
@@ -86,6 +88,8 @@ enum nfc_commands {
  * @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID
  * @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the
  *     target is not NFC-Forum compliant)
+ * @NFC_ATTR_COMM_MODE: Passive or active mode
+ * @NFC_ATTR_RF_MODE: Initiator or target
  */
 enum nfc_attrs {
        NFC_ATTR_UNSPEC,
@@ -95,6 +99,8 @@ enum nfc_attrs {
        NFC_ATTR_TARGET_INDEX,
        NFC_ATTR_TARGET_SENS_RES,
        NFC_ATTR_TARGET_SEL_RES,
+       NFC_ATTR_COMM_MODE,
+       NFC_ATTR_RF_MODE,
 /* private: internal use only */
        __NFC_ATTR_AFTER_LAST
 };
@@ -111,6 +117,14 @@ enum nfc_attrs {
 
 #define NFC_PROTO_MAX          6
 
+/* NFC communication modes */
+#define NFC_COMM_ACTIVE  0
+#define NFC_COMM_PASSIVE 1
+
+/* NFC RF modes */
+#define NFC_RF_INITIATOR 0
+#define NFC_RF_TARGET    1
+
 /* NFC protocols masks used in bitsets */
 #define NFC_PROTO_JEWEL_MASK   (1 << NFC_PROTO_JEWEL)
 #define NFC_PROTO_MIFARE_MASK  (1 << NFC_PROTO_MIFARE)
@@ -125,9 +139,22 @@ struct sockaddr_nfc {
        __u32 nfc_protocol;
 };
 
+#define NFC_LLCP_MAX_SERVICE_NAME 63
+struct sockaddr_nfc_llcp {
+       sa_family_t sa_family;
+       __u32 dev_idx;
+       __u32 target_idx;
+       __u32 nfc_protocol;
+       __u8 dsap; /* Destination SAP, if known */
+       __u8 ssap; /* Source SAP to be bound to */
+       char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
+       size_t service_name_len;
+};
+
 /* NFC socket protocols */
 #define NFC_SOCKPROTO_RAW      0
-#define NFC_SOCKPROTO_MAX      1
+#define NFC_SOCKPROTO_LLCP     1
+#define NFC_SOCKPROTO_MAX      2
 
 #define NFC_HEADER_SIZE 1
 
index 97bfebf..f795cb7 100644 (file)
  *     OLBC handling in hostapd. Beacons are reported in %NL80211_CMD_FRAME
  *     messages. Note that per PHY only one application may register.
  *
+ * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether
+ *      No Acknowledgement Policy should be applied.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -675,6 +678,8 @@ enum nl80211_commands {
 
        NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
 
+       NL80211_CMD_SET_NOACK_MAP,
+
        /* add new commands above here */
 
        /* used to define NL80211_CMD_MAX below */
@@ -1185,6 +1190,9 @@ enum nl80211_commands {
  *    abides to when initiating radiation on DFS channels. A country maps
  *    to one DFS region.
  *
+ * @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of
+ *      up to 16 TIDs.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1428,6 +1436,8 @@ enum nl80211_attrs {
        NL80211_ATTR_DISABLE_HT,
        NL80211_ATTR_HT_CAPABILITY_MASK,
 
+       NL80211_ATTR_NOACK_MAP,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -1526,7 +1536,11 @@ enum nl80211_iftype {
  * @NL80211_STA_FLAG_WME: station is WME/QoS capable
  * @NL80211_STA_FLAG_MFP: station uses management frame protection
  * @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated
- * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer
+ * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer -- this flag should
+ *     only be used in managed mode (even in the flags mask). Note that the
+ *     flag can't be changed, it is only valid while adding a station, and
+ *     attempts to change it will silently be ignored (rather than rejected
+ *     as errors.)
  * @NL80211_STA_FLAG_MAX: highest station flag number currently defined
  * @__NL80211_STA_FLAG_AFTER_LAST: internal use
  */
@@ -2084,6 +2098,10 @@ enum nl80211_mntr_flags {
  * access to a broader network beyond the MBSS.  This is done via Root
  * Announcement frames.
  *
+ * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in
+ * TUs) during which a mesh STA can send only one Action frame containing a
+ * PERR element.
+ *
  * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
  *
  * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
@@ -2107,6 +2125,7 @@ enum nl80211_meshconf_params {
        NL80211_MESHCONF_ELEMENT_TTL,
        NL80211_MESHCONF_HWMP_RANN_INTERVAL,
        NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+       NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
 
        /* keep last */
        __NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2770,9 +2789,11 @@ enum nl80211_ap_sme_features {
  * @NL80211_FEATURE_SK_TX_STATUS: This driver supports reflecting back
  *     TX status to the socket error queue when requested with the
  *     socket option.
+ * @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates.
  */
 enum nl80211_feature_flags {
        NL80211_FEATURE_SK_TX_STATUS    = 1 << 0,
+       NL80211_FEATURE_HT_IBSS         = 1 << 1,
 };
 
 /**
index 172ba70..2aaee0c 100644 (file)
 #define PCI_DEVICE_ID_AMD_11H_NB_DRAM  0x1302
 #define PCI_DEVICE_ID_AMD_11H_NB_MISC  0x1303
 #define PCI_DEVICE_ID_AMD_11H_NB_LINK  0x1304
+#define PCI_DEVICE_ID_AMD_15H_NB_F0    0x1600
+#define PCI_DEVICE_ID_AMD_15H_NB_F1    0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2    0x1602
 #define PCI_DEVICE_ID_AMD_15H_NB_F3    0x1603
 #define PCI_DEVICE_ID_AMD_15H_NB_F4    0x1604
+#define PCI_DEVICE_ID_AMD_15H_NB_F5    0x1605
 #define PCI_DEVICE_ID_AMD_CNB17H_F3    0x1703
 #define PCI_DEVICE_ID_AMD_LANCE                0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME   0x2001
index 1e9ebe5..b1f8912 100644 (file)
@@ -822,6 +822,7 @@ struct perf_event {
        int                             mmap_locked;
        struct user_struct              *mmap_user;
        struct ring_buffer              *rb;
+       struct list_head                rb_entry;
 
        /* poll related */
        wait_queue_head_t               waitq;
index fb556dc..8daced3 100644 (file)
@@ -181,6 +181,7 @@ enum {
        TCA_RED_UNSPEC,
        TCA_RED_PARMS,
        TCA_RED_STAB,
+       TCA_RED_MAX_P,
        __TCA_RED_MAX,
 };
 
@@ -194,8 +195,9 @@ struct tc_red_qopt {
        unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
        unsigned char   Scell_log;      /* cell size for idle damping */
        unsigned char   flags;
-#define TC_RED_ECN     1
-#define TC_RED_HARDDROP        2
+#define TC_RED_ECN             1
+#define TC_RED_HARDDROP                2
+#define TC_RED_ADAPTATIVE      4
 };
 
 struct tc_red_xstats {
@@ -214,6 +216,7 @@ enum {
        TCA_GRED_PARMS,
        TCA_GRED_STAB,
        TCA_GRED_DPS,
+       TCA_GRED_MAX_P,
           __TCA_GRED_MAX,
 };
 
@@ -253,6 +256,7 @@ enum {
        TCA_CHOKE_UNSPEC,
        TCA_CHOKE_PARMS,
        TCA_CHOKE_STAB,
+       TCA_CHOKE_MAX_P,
        __TCA_CHOKE_MAX,
 };
 
@@ -498,6 +502,9 @@ struct tc_netem_corrupt {
 
 struct tc_netem_rate {
        __u32   rate;   /* byte/s */
+       __s32   packet_overhead;
+       __u32   cell_size;
+       __s32   cell_overhead;
 };
 
 enum {
index a83833a..07ceb97 100644 (file)
@@ -35,7 +35,7 @@ struct shrinker {
 
        /* These are for internal use */
        struct list_head list;
-       long nr;        /* objs pending delete */
+       atomic_long_t nr_in_batch; /* objs pending delete */
 };
 #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
 extern void register_shrinker(struct shrinker *);
index e2accb3..d0de882 100644 (file)
@@ -24,7 +24,7 @@ struct sigma_firmware {
 struct sigma_firmware_header {
        unsigned char magic[7];
        u8 version;
-       u32 crc;
+       __le32 crc;
 };
 
 enum {
@@ -40,19 +40,14 @@ enum {
 struct sigma_action {
        u8 instr;
        u8 len_hi;
-       u16 len;
-       u16 addr;
+       __le16 len;
+       __be16 addr;
        unsigned char payload[];
 };
 
 static inline u32 sigma_action_len(struct sigma_action *sa)
 {
-       return (sa->len_hi << 16) | sa->len;
-}
-
-static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
-{
-       return sizeof(*sa) + payload_len + (payload_len % 2);
+       return (sa->len_hi << 16) | le16_to_cpu(sa->len);
 }
 
 extern int process_sigma_firmware(struct i2c_client *client, const char *name);
index 12e6fed..f47f0c3 100644 (file)
@@ -128,13 +128,17 @@ struct sk_buff_head {
 
 struct sk_buff;
 
-/* To allow 64K frame to be packed as single skb without frag_list. Since
- * GRO uses frags we allocate at least 16 regardless of page size.
+/* To allow 64K frame to be packed as single skb without frag_list we
+ * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
+ * buffers which do not start on a page boundary.
+ *
+ * Since GRO uses frags we allocate at least 16 regardless of page
+ * size.
  */
-#if (65536/PAGE_SIZE + 2) < 16
+#if (65536/PAGE_SIZE + 1) < 16
 #define MAX_SKB_FRAGS 16UL
 #else
-#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
+#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 #endif
 
 typedef struct skb_frag_struct skb_frag_t;
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
new file mode 100644 (file)
index 0000000..379d5dc
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef __SOCK_DIAG_H__
+#define __SOCK_DIAG_H__
+
+#define SOCK_DIAG_BY_FAMILY 20
+
+struct sk_buff;
+struct nlmsghdr;
+
+struct sock_diag_req {
+       __u8    sdiag_family;
+       __u8    sdiag_protocol;
+};
+
+struct sock_diag_handler {
+       __u8 family;
+       int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+};
+
+int sock_diag_register(struct sock_diag_handler *h);
+void sock_diag_unregister(struct sock_diag_handler *h);
+
+void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+
+int sock_diag_check_cookie(void *sk, __u32 *cookie);
+void sock_diag_save_cookie(void *sk, __u32 *cookie);
+
+extern struct sock *sock_diag_nlsk;
+#endif
index 061e560..dcf35b0 100644 (file)
@@ -94,6 +94,15 @@ struct ssb_sprom {
                } ghz5;         /* 5GHz band */
        } antenna_gain;
 
+       struct {
+               struct {
+                       u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut;
+               } ghz2;
+               struct {
+                       u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut;
+               } ghz5;
+       } fem;
+
        /* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */
 };
 
index 9894120..c814ae6 100644 (file)
 #define  SSB_SPROM8_RXPO2G             0x00FF  /* 2GHz RX power offset */
 #define  SSB_SPROM8_RXPO5G             0xFF00  /* 5GHz RX power offset */
 #define  SSB_SPROM8_RXPO5G_SHIFT       8
+#define SSB_SPROM8_FEM2G               0x00AE
+#define SSB_SPROM8_FEM5G               0x00B0
+#define  SSB_SROM8_FEM_TSSIPOS         0x0001
+#define  SSB_SROM8_FEM_TSSIPOS_SHIFT   0
+#define  SSB_SROM8_FEM_EXTPA_GAIN      0x0006
+#define  SSB_SROM8_FEM_EXTPA_GAIN_SHIFT        1
+#define  SSB_SROM8_FEM_PDET_RANGE      0x00F8
+#define  SSB_SROM8_FEM_PDET_RANGE_SHIFT        3
+#define  SSB_SROM8_FEM_TR_ISO          0x0700
+#define  SSB_SROM8_FEM_TR_ISO_SHIFT    8
+#define  SSB_SROM8_FEM_ANTSWLUT                0xF800
+#define  SSB_SROM8_FEM_ANTSWLUT_SHIFT  11
+#define SSB_SPROM8_THERMAL             0x00B2
+#define SSB_SPROM8_MPWR_RAWTS          0x00B4
+#define SSB_SPROM8_TS_SLP_OPT_CORRX    0x00B6
+#define SSB_SPROM8_FOC_HWIQ_IQSWP      0x00B8
+#define SSB_SPROM8_PHYCAL_TEMPDELTA    0x00BA
 #define SSB_SPROM8_MAXP_BG             0x00C0  /* Max Power 2GHz in path 1 */
 #define  SSB_SPROM8_MAXP_BG_MASK       0x00FF  /* Mask for Max Power 2GHz */
 #define  SSB_SPROM8_ITSSI_BG           0xFF00  /* Mask for path 1 itssi_bg */
index f15fd98..2c5993a 100644 (file)
@@ -215,7 +215,7 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
        return true;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
                                   const struct sockaddr *sap2)
 {
@@ -240,7 +240,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
        dsin6->sin6_addr = ssin6->sin6_addr;
        return true;
 }
-#else  /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+#else  /* !(IS_ENABLED(CONFIG_IPV6) */
 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
                                   const struct sockaddr *sap2)
 {
@@ -252,7 +252,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
 {
        return false;
 }
-#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+#endif /* !(IS_ENABLED(CONFIG_IPV6) */
 
 /**
  * rpc_cmp_addr - compare the address portion of two sockaddrs.
index 7f59ee9..46a85c9 100644 (file)
@@ -238,6 +238,11 @@ struct tcp_sack_block {
        u32     end_seq;
 };
 
+/*These are used to set the sack_ok field in struct tcp_options_received */
+#define TCP_SACK_SEEN     (1 << 0)   /*1 = peer is SACK capable, */
+#define TCP_FACK_ENABLED  (1 << 1)   /*1 = FACK is enabled locally*/
+#define TCP_DSACK_SEEN    (1 << 2)   /*1 = DSACK was received from peer*/
+
 struct tcp_options_received {
 /*     PAWS/RTTM data  */
        long    ts_recent_stamp;/* Time we stored ts_recent (for aging) */
diff --git a/include/linux/unix_diag.h b/include/linux/unix_diag.h
new file mode 100644 (file)
index 0000000..3f7afb0
--- /dev/null
@@ -0,0 +1,45 @@
+#ifndef __UNIX_DIAG_H__
+#define __UNIX_DIAG_H__
+
+struct unix_diag_req {
+       __u8    sdiag_family;
+       __u8    sdiag_protocol;
+       __u16   pad;
+       __u32   udiag_states;
+       __u32   udiag_ino;
+       __u32   udiag_show;
+       __u32   udiag_cookie[2];
+};
+
+#define UDIAG_SHOW_NAME                0x00000001      /* show name (not path) */
+#define UDIAG_SHOW_VFS         0x00000002      /* show VFS inode info */
+#define UDIAG_SHOW_PEER                0x00000004      /* show peer socket info */
+#define UDIAG_SHOW_ICONS       0x00000008      /* show pending connections */
+#define UDIAG_SHOW_RQLEN       0x00000010      /* show skb receive queue len */
+
+struct unix_diag_msg {
+       __u8    udiag_family;
+       __u8    udiag_type;
+       __u8    udiag_state;
+       __u8    pad;
+
+       __u32   udiag_ino;
+       __u32   udiag_cookie[2];
+};
+
+enum {
+       UNIX_DIAG_NAME,
+       UNIX_DIAG_VFS,
+       UNIX_DIAG_PEER,
+       UNIX_DIAG_ICONS,
+       UNIX_DIAG_RQLEN,
+
+       UNIX_DIAG_MAX,
+};
+
+struct unix_diag_vfs {
+       __u32   udiag_vfs_ino;
+       __u32   udiag_vfs_dev;
+};
+
+#endif
index 4b69739..0d63731 100644 (file)
@@ -54,6 +54,9 @@ struct wl12xx_platform_data {
        int board_ref_clock;
        int board_tcxo_clock;
        unsigned long platform_quirks;
+       bool pwr_in_suspend;
+
+       struct wl1271_if_operations *ops;
 };
 
 /* Platform does not support level trigger interrupts */
@@ -73,6 +76,6 @@ int wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
 
 #endif
 
-const struct wl12xx_platform_data *wl12xx_get_platform_data(void);
+struct wl12xx_platform_data *wl12xx_get_platform_data(void);
 
 #endif
index b1377b9..5fb2c3d 100644 (file)
@@ -254,7 +254,7 @@ unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
 static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client)
 {
        struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        return icd ? icd->vdev : NULL;
 }
 
@@ -279,6 +279,11 @@ static inline struct soc_camera_device *soc_camera_from_vbq(const struct videobu
        return container_of(vq, struct soc_camera_device, vb_vidq);
 }
 
+static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
+{
+       return (icd->iface << 8) | (icd->devnum + 1);
+}
+
 void soc_camera_lock(struct vb2_queue *vq);
 void soc_camera_unlock(struct vb2_queue *vq);
 
index 91ab5b0..63b1781 100644 (file)
@@ -11,10 +11,13 @@ extern void unix_notinflight(struct file *fp);
 extern void unix_gc(void);
 extern void wait_for_unix_gc(void);
 extern struct sock *unix_get_socket(struct file *filp);
+extern struct sock *unix_peer_get(struct sock *);
 
 #define UNIX_HASH_SIZE 256
 
 extern unsigned int unix_tot_inflight;
+extern spinlock_t unix_table_lock;
+extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
 
 struct unix_address {
        atomic_t        refcnt;
index 835f3b2..980e59f 100644 (file)
 #define PF_BLUETOOTH   AF_BLUETOOTH
 #endif
 
+/* Bluetooth versions */
+#define BLUETOOTH_VER_1_1      1
+#define BLUETOOTH_VER_1_2      2
+#define BLUETOOTH_VER_2_0      3
+
 /* Reserv for core and drivers use */
 #define BT_SKB_RESERVE 8
 
index 139ce2a..67ad984 100644 (file)
@@ -88,6 +88,14 @@ enum {
        HCI_RESET,
 };
 
+/*
+ * BR/EDR and/or LE controller flags: the flags defined here should represent
+ * states from the controller.
+ */
+enum {
+       HCI_LE_SCAN,
+};
+
 /* HCI ioctl defines */
 #define HCIDEVUP       _IOW('H', 201, int)
 #define HCIDEVDOWN     _IOW('H', 202, int)
@@ -453,6 +461,14 @@ struct hci_rp_user_confirm_reply {
 
 #define HCI_OP_USER_CONFIRM_NEG_REPLY  0x042d
 
+#define HCI_OP_USER_PASSKEY_REPLY              0x042e
+struct hci_cp_user_passkey_reply {
+       bdaddr_t bdaddr;
+       __le32  passkey;
+} __packed;
+
+#define HCI_OP_USER_PASSKEY_NEG_REPLY  0x042f
+
 #define HCI_OP_REMOTE_OOB_DATA_REPLY   0x0430
 struct hci_cp_remote_oob_data_reply {
        bdaddr_t bdaddr;
@@ -669,6 +685,12 @@ struct hci_rp_read_local_oob_data {
 
 #define HCI_OP_READ_INQ_RSP_TX_POWER   0x0c58
 
+#define HCI_OP_READ_FLOW_CONTROL_MODE  0x0c66
+struct hci_rp_read_flow_control_mode {
+       __u8     status;
+       __u8     mode;
+} __packed;
+
 #define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d
 struct hci_cp_write_le_host_supported {
        __u8 le;
@@ -760,6 +782,15 @@ struct hci_rp_le_read_buffer_size {
        __u8     le_max_pkt;
 } __packed;
 
+#define HCI_OP_LE_SET_SCAN_PARAM       0x200b
+struct hci_cp_le_set_scan_param {
+       __u8    type;
+       __le16  interval;
+       __le16  window;
+       __u8    own_address_type;
+       __u8    filter_policy;
+} __packed;
+
 #define HCI_OP_LE_SET_SCAN_ENABLE      0x200c
 struct hci_cp_le_set_scan_enable {
        __u8     enable;
@@ -1076,6 +1107,11 @@ struct hci_ev_user_confirm_req {
        __le32          passkey;
 } __packed;
 
+#define HCI_EV_USER_PASSKEY_REQUEST    0x34
+struct hci_ev_user_passkey_req {
+       bdaddr_t        bdaddr;
+} __packed;
+
 #define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35
 struct hci_ev_remote_oob_data_request {
        bdaddr_t bdaddr;
@@ -1331,4 +1367,6 @@ struct hci_inquiry_req {
 };
 #define IREQ_CACHE_FLUSH 0x0001
 
+extern int enable_hs;
+
 #endif /* __HCI_H */
index f333e76..ea4395f 100644 (file)
@@ -170,6 +170,8 @@ struct hci_dev {
        __u32           amp_max_flush_to;
        __u32           amp_be_flush_to;
 
+       __u8            flow_ctl_mode;
+
        unsigned int    auto_accept_delay;
 
        unsigned long   quirks;
@@ -250,6 +252,8 @@ struct hci_dev {
 
        struct module           *owner;
 
+       unsigned long           dev_flags;
+
        int (*open)(struct hci_dev *hdev);
        int (*close)(struct hci_dev *hdev);
        int (*flush)(struct hci_dev *hdev);
@@ -917,11 +921,13 @@ int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
                                                                u8 persistent);
-int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-int mgmt_disconnect_failed(struct hci_dev *hdev);
-int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type,
-                                                               u8 status);
+int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                                               u8 addr_type);
+int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                                               u8 addr_type);
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                               u8 addr_type, u8 status);
 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                                                u8 status);
@@ -933,14 +939,20 @@ int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                                                u8 status);
 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
                                                bdaddr_t *bdaddr, u8 status);
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                                               u8 status);
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
+                                               bdaddr_t *bdaddr, u8 status);
 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
                                                u8 *randomizer, u8 status);
-int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type,
-                                       u8 *dev_class, s8 rssi, u8 *eir);
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                               u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir);
 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name);
-int mgmt_inquiry_failed(struct hci_dev *hdev, u8 status);
+int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status);
+int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status);
 int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
index 875021a..72632f1 100644 (file)
@@ -791,8 +791,7 @@ static inline __u8 __ctrl_size(struct l2cap_chan *chan)
                return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE;
 }
 
-extern int disable_ertm;
-extern int enable_hs;
+extern bool disable_ertm;
 
 int l2cap_init_sockets(void);
 void l2cap_cleanup_sockets(void);
@@ -810,5 +809,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan);
 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
                                                                u32 priority);
 void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
+int l2cap_chan_check_security(struct l2cap_chan *chan);
 
 #endif /* __L2CAP_H */
index 3e320c9..3b68806 100644 (file)
 
 #define MGMT_INDEX_NONE                        0xFFFF
 
+#define MGMT_STATUS_SUCCESS            0x00
+#define MGMT_STATUS_UNKNOWN_COMMAND    0x01
+#define MGMT_STATUS_NOT_CONNECTED      0x02
+#define MGMT_STATUS_FAILED             0x03
+#define MGMT_STATUS_CONNECT_FAILED     0x04
+#define MGMT_STATUS_AUTH_FAILED                0x05
+#define MGMT_STATUS_NOT_PAIRED         0x06
+#define MGMT_STATUS_NO_RESOURCES       0x07
+#define MGMT_STATUS_TIMEOUT            0x08
+#define MGMT_STATUS_ALREADY_CONNECTED  0x09
+#define MGMT_STATUS_BUSY               0x0a
+#define MGMT_STATUS_REJECTED           0x0b
+#define MGMT_STATUS_NOT_SUPPORTED      0x0c
+#define MGMT_STATUS_INVALID_PARAMS     0x0d
+#define MGMT_STATUS_DISCONNECTED       0x0e
+#define MGMT_STATUS_NOT_POWERED                0x0f
+
 struct mgmt_hdr {
        __le16 opcode;
        __le16 index;
@@ -119,6 +136,10 @@ struct mgmt_cp_remove_keys {
        bdaddr_t bdaddr;
        __u8 disconnect;
 } __packed;
+struct mgmt_rp_remove_keys {
+       bdaddr_t bdaddr;
+       __u8 status;
+};
 
 #define MGMT_OP_DISCONNECT             0x000F
 struct mgmt_cp_disconnect {
@@ -126,11 +147,12 @@ struct mgmt_cp_disconnect {
 } __packed;
 struct mgmt_rp_disconnect {
        bdaddr_t bdaddr;
+       __u8 status;
 } __packed;
 
 #define MGMT_ADDR_BREDR                        0x00
-#define MGMT_ADDR_LE                   0x01
-#define MGMT_ADDR_BREDR_LE             0x02
+#define MGMT_ADDR_LE_PUBLIC            0x01
+#define MGMT_ADDR_LE_RANDOM            0x02
 #define MGMT_ADDR_INVALID              0xff
 
 struct mgmt_addr_info {
@@ -167,11 +189,11 @@ struct mgmt_cp_set_io_capability {
 
 #define MGMT_OP_PAIR_DEVICE            0x0014
 struct mgmt_cp_pair_device {
-       bdaddr_t bdaddr;
+       struct mgmt_addr_info addr;
        __u8 io_cap;
 } __packed;
 struct mgmt_rp_pair_device {
-       bdaddr_t bdaddr;
+       struct mgmt_addr_info addr;
        __u8 status;
 } __packed;
 
@@ -210,6 +232,9 @@ struct mgmt_cp_remove_remote_oob_data {
 } __packed;
 
 #define MGMT_OP_START_DISCOVERY                0x001B
+struct mgmt_cp_start_discovery {
+       __u8 type;
+} __packed;
 
 #define MGMT_OP_STOP_DISCOVERY         0x001C
 
@@ -228,6 +253,17 @@ struct mgmt_cp_set_fast_connectable {
        __u8 enable;
 } __packed;
 
+#define MGMT_OP_USER_PASSKEY_REPLY     0x0020
+struct mgmt_cp_user_passkey_reply {
+       bdaddr_t bdaddr;
+       __le32 passkey;
+} __packed;
+
+#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x0021
+struct mgmt_cp_user_passkey_neg_reply {
+       bdaddr_t bdaddr;
+} __packed;
+
 #define MGMT_EV_CMD_COMPLETE           0x0001
 struct mgmt_ev_cmd_complete {
        __le16 opcode;
@@ -322,3 +358,8 @@ struct mgmt_ev_device_blocked {
 struct mgmt_ev_device_unblocked {
        bdaddr_t bdaddr;
 } __packed;
+
+#define MGMT_EV_USER_PASSKEY_REQUEST   0x0017
+struct mgmt_ev_user_passkey_request {
+       bdaddr_t bdaddr;
+} __packed;
index 87c3d11..aa6a485 100644 (file)
@@ -55,8 +55,8 @@
 struct cfspi_xfer {
        u16 tx_dma_len;
        u16 rx_dma_len;
-       void *va_tx;
-       dma_addr_t pa_tx;
+       void *va_tx[2];
+       dma_addr_t pa_tx[2];
        void *va_rx;
        dma_addr_t pa_rx;
 };
index d5e1891..9f85fca 100644 (file)
@@ -782,6 +782,7 @@ struct mesh_config {
        u16 min_discovery_timeout;
        u32 dot11MeshHWMPactivePathTimeout;
        u16 dot11MeshHWMPpreqMinInterval;
+       u16 dot11MeshHWMPperrMinInterval;
        u16 dot11MeshHWMPnetDiameterTraversalTime;
        u8  dot11MeshHWMPRootMode;
        u16 dot11MeshHWMPRannInterval;
@@ -802,6 +803,7 @@ struct mesh_config {
  * @ie_len: length of vendor information elements
  * @is_authenticated: this mesh requires authentication
  * @is_secure: this mesh uses security
+ * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
  *
  * These parameters are fixed when the mesh is created.
  */
@@ -814,6 +816,7 @@ struct mesh_setup {
        u8 ie_len;
        bool is_authenticated;
        bool is_secure;
+       int mcast_rate[IEEE80211_NUM_BANDS];
 };
 
 /**
@@ -1146,6 +1149,7 @@ struct cfg80211_ibss_params {
        u8 *ssid;
        u8 *bssid;
        struct ieee80211_channel *channel;
+       enum nl80211_channel_type channel_type;
        u8 *ie;
        u8 ssid_len, ie_len;
        u16 beacon_interval;
@@ -1342,7 +1346,12 @@ struct cfg80211_gtk_rekey_data {
  *
  * @add_station: Add a new station.
  * @del_station: Remove a station; @mac may be NULL to remove all stations.
- * @change_station: Modify a given station.
+ * @change_station: Modify a given station. Note that flags changes are not much
+ *     validated in cfg80211, in particular the auth/assoc/authorized flags
+ *     might come to the driver in invalid combinations -- make sure to check
+ *     them, also against the existing state! Also, supported_rates changes are
+ *     not checked in station mode -- drivers need to reject (or ignore) them
+ *     for anything but TDLS peers.
  * @get_station: get station information for the station identified by @mac
  * @dump_station: dump station callback -- resume dump at index @idx
  *
@@ -1399,7 +1408,8 @@ struct cfg80211_gtk_rekey_data {
  *     have changed. The actual parameter values are available in
  *     struct wiphy. If returning an error, no value should be changed.
  *
- * @set_tx_power: set the transmit power according to the parameters
+ * @set_tx_power: set the transmit power according to the parameters,
+ *     the power passed is in mBm, to get dBm use MBM_TO_DBM().
  * @get_tx_power: store the current TX power into the dbm variable;
  *     return 0 if successful
  *
@@ -1465,6 +1475,8 @@ struct cfg80211_gtk_rekey_data {
  *
  * @probe_client: probe an associated client, must return a cookie that it
  *     later passes to cfg80211_probe_status().
+ *
+ * @set_noack_map: Set the NoAck Map for the TIDs.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1658,6 +1670,10 @@ struct cfg80211_ops {
        int     (*probe_client)(struct wiphy *wiphy, struct net_device *dev,
                                const u8 *peer, u64 *cookie);
 
+       int     (*set_noack_map)(struct wiphy *wiphy,
+                                 struct net_device *dev,
+                                 u16 noack_map);
+
        struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy);
 };
 
@@ -1683,7 +1699,9 @@ struct cfg80211_ops {
  *     regulatory domain no user regulatory domain can enable these channels
  *     at a later time. This can be used for devices which do not have
  *     calibration information guaranteed for frequencies or settings
- *     outside of its regulatory domain.
+ *     outside of its regulatory domain. If used in combination with
+ *     WIPHY_FLAG_CUSTOM_REGULATORY the inspected country IE power settings
+ *     will be followed.
  * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
  *     that passive scan flags and beaconing flags may not be lifted by
  *     cfg80211 due to regulatory beacon hints. For more information on beacon
@@ -3052,6 +3070,32 @@ void cfg80211_roamed(struct net_device *dev,
                     const u8 *req_ie, size_t req_ie_len,
                     const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
 
+/**
+ * cfg80211_roamed_bss - notify cfg80211 of roaming
+ *
+ * @dev: network device
+ * @bss: entry of bss to which STA got roamed
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @gfp: allocation flags
+ *
+ * This is just a wrapper to notify cfg80211 of roaming event with driver
+ * passing bss to avoid a race in timeout of the bss entry. It should be
+ * called by the underlying driver whenever it roamed from one AP to another
+ * while connected. Drivers which have roaming implemented in firmware
+ * may use this function to avoid a race in bss entry timeout where the bss
+ * entry of the new AP is seen in the driver, but gets timed out by the time
+ * it is accessed in __cfg80211_roamed() due to delay in scheduling
+ * rdev->event_work. In case of any failures, the reference is released
+ * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise,
+ * it will be released while diconneting from the current bss.
+ */
+void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
+                        const u8 *req_ie, size_t req_ie_len,
+                        const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
+
 /**
  * cfg80211_disconnected - notify cfg80211 that connection was dropped
  *
@@ -3257,6 +3301,16 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
                                 const u8 *frame, size_t len,
                                 int freq, gfp_t gfp);
 
+/*
+ * cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used
+ * @wiphy: the wiphy
+ * @chan: main channel
+ * @channel_type: HT mode
+ */
+int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+                                struct ieee80211_channel *chan,
+                                enum nl80211_channel_type channel_type);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
index 6faec1a..344c8dd 100644 (file)
@@ -53,6 +53,7 @@ struct dst_entry {
 #define DST_NOHASH             0x0008
 #define DST_NOCACHE            0x0010
 #define DST_NOCOUNT            0x0020
+#define DST_NOPEER             0x0040
 
        short                   error;
        short                   obsolete;
@@ -86,12 +87,12 @@ struct dst_entry {
        };
 };
 
-static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst)
+static inline struct neighbour *dst_get_neighbour_noref(struct dst_entry *dst)
 {
        return rcu_dereference(dst->_neighbour);
 }
 
-static inline struct neighbour *dst_get_neighbour_raw(struct dst_entry *dst)
+static inline struct neighbour *dst_get_neighbour_noref_raw(struct dst_entry *dst)
 {
        return rcu_dereference_raw(dst->_neighbour);
 }
@@ -392,7 +393,7 @@ static inline void dst_confirm(struct dst_entry *dst)
                struct neighbour *n;
 
                rcu_read_lock();
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_noref(dst);
                neigh_confirm(n);
                rcu_read_unlock();
        }
index 9192d69..da1f064 100644 (file)
@@ -210,6 +210,7 @@ extern struct flow_cache_object *flow_cache_lookup(
                u8 dir, flow_resolve_t resolver, void *ctx);
 
 extern void flow_cache_flush(void);
+extern void flow_cache_flush_deferred(void);
 extern atomic_t flow_cache_genid;
 
 #endif
index e46674d..00cbb43 100644 (file)
@@ -15,7 +15,7 @@
 #define _INET6_HASHTABLES_H
 
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/in6.h>
 #include <linux/ipv6.h>
 #include <linux/types.h>
@@ -110,5 +110,5 @@ extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo
                                 const struct in6_addr *saddr, const __be16 sport,
                                 const struct in6_addr *daddr, const __be16 dport,
                                 const int dif);
-#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 #endif /* _INET6_HASHTABLES_H */
index f941964..e3e4051 100644 (file)
@@ -71,7 +71,7 @@ struct ip_options_data {
 
 struct inet_request_sock {
        struct request_sock     req;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        u16                     inet6_rsk_offset;
 #endif
        __be16                  loc_port;
@@ -139,7 +139,7 @@ struct rtable;
 struct inet_sock {
        /* sk and pinet6 has to be the first two members of inet_sock */
        struct sock             sk;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct ipv6_pinfo       *pinet6;
 #endif
        /* Socket demultiplex comparisons on incoming packets. */
@@ -188,7 +188,7 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to,
        memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
               sk_from->sk_prot->obj_size - ancestor_size);
 }
-#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE))
+#if !(IS_ENABLED(CONFIG_IPV6))
 static inline void inet_sk_copy_descendant(struct sock *sk_to,
                                           const struct sock *sk_from)
 {
index e8c25b9..ba52c83 100644 (file)
@@ -218,20 +218,12 @@ extern void inet_twsk_purge(struct inet_hashinfo *hashinfo,
 static inline
 struct net *twsk_net(const struct inet_timewait_sock *twsk)
 {
-#ifdef CONFIG_NET_NS
-       return rcu_dereference_raw(twsk->tw_net); /* protected by locking, */
-                                                 /* reference counting, */
-                                                 /* initialization, or RCU. */
-#else
-       return &init_net;
-#endif
+       return read_pnet(&twsk->tw_net);
 }
 
 static inline
 void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net)
 {
-#ifdef CONFIG_NET_NS
-       rcu_assign_pointer(twsk->tw_net, net);
-#endif
+       write_pnet(&twsk->tw_net, net);
 }
 #endif /* _INET_TIMEWAIT_SOCK_ */
index fd1561e..775009f 100644 (file)
@@ -353,14 +353,14 @@ static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast,
                memcpy(buf, &naddr, sizeof(naddr));
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
 static __inline__ void inet_reset_saddr(struct sock *sk)
 {
        inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (sk->sk_family == PF_INET6) {
                struct ipv6_pinfo *np = inet6_sk(sk);
 
@@ -379,7 +379,7 @@ static inline int sk_mc_loop(struct sock *sk)
        switch (sk->sk_family) {
        case AF_INET:
                return inet_sk(sk)->mc_loop;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                return inet6_sk(sk)->mc_loop;
 #endif
index 9c9399c..2ad92ca 100644 (file)
@@ -97,14 +97,14 @@ extern struct rt6_info              *rt6_lookup(struct net *net,
 
 extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
                                         struct neighbour *neigh,
-                                        const struct in6_addr *addr);
+                                        struct flowi6 *fl6);
 extern int icmp6_dst_gc(void);
 
 extern void fib6_force_start_gc(struct net *net);
 
 extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                                           const struct in6_addr *addr,
-                                          int anycast);
+                                          bool anycast);
 
 extern int                     ip6_dst_hoplimit(struct dst_entry *dst);
 
index f2419cf..0954ec9 100644 (file)
@@ -27,7 +27,6 @@ enum {
        IUCV_OPEN,
        IUCV_BOUND,
        IUCV_LISTEN,
-       IUCV_SEVERED,
        IUCV_DISCONN,
        IUCV_CLOSING,
        IUCV_CLOSED
@@ -146,7 +145,6 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
                            poll_table *wait);
 void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
-int  iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
 void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
 void iucv_accept_unlink(struct sock *sk);
 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock);
index 3bb6fa0..ee547c1 100644 (file)
@@ -77,7 +77,7 @@ struct net {
        struct netns_packet     packet;
        struct netns_unix       unx;
        struct netns_ipv4       ipv4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netns_ipv6       ipv6;
 #endif
 #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
index e505358..75ca929 100644 (file)
@@ -131,7 +131,7 @@ nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
        return sk;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline struct sock *
 nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
                      const struct in6_addr *saddr, const struct in6_addr *daddr,
index d786b4f..bbd023a 100644 (file)
@@ -55,6 +55,7 @@ struct netns_ipv4 {
        int current_rt_cache_rebuild_count;
 
        unsigned int sysctl_ping_group_range[2];
+       long sysctl_tcp_mem[3];
 
        atomic_t rt_genid;
        atomic_t dev_addr_genid;
index 30f6728..d542a4b 100644 (file)
@@ -12,7 +12,7 @@ struct netns_mib {
        DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
        DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct proc_dir_entry *proc_net_devsnmp6;
        DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
        DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
index 748f91f..5299e69 100644 (file)
@@ -56,7 +56,7 @@ struct netns_xfrm {
 #endif
 
        struct dst_ops          xfrm4_dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct dst_ops          xfrm6_dst_ops;
 #endif
 };
index 6a7f602..ccfe757 100644 (file)
@@ -52,6 +52,9 @@ struct nfc_ops {
        int (*dev_down)(struct nfc_dev *dev);
        int (*start_poll)(struct nfc_dev *dev, u32 protocols);
        void (*stop_poll)(struct nfc_dev *dev);
+       int (*dep_link_up)(struct nfc_dev *dev, int target_idx,
+                               u8 comm_mode, u8 rf_mode);
+       int (*dep_link_down)(struct nfc_dev *dev);
        int (*activate_target)(struct nfc_dev *dev, u32 target_idx,
                                                        u32 protocol);
        void (*deactivate_target)(struct nfc_dev *dev, u32 target_idx);
@@ -60,6 +63,9 @@ struct nfc_ops {
                                                        void *cb_context);
 };
 
+#define NFC_TARGET_IDX_ANY -1
+#define NFC_MAX_GT_LEN 48
+
 struct nfc_target {
        u32 idx;
        u32 supported_protocols;
@@ -83,6 +89,8 @@ struct nfc_dev {
        bool dev_up;
        bool polling;
        bool remote_activated;
+       bool dep_link_up;
+       u32 dep_rf_mode;
        struct nfc_genl_data genl_data;
        u32 supported_protocols;
 
@@ -157,9 +165,20 @@ static inline const char *nfc_device_name(struct nfc_dev *dev)
        return dev_name(&dev->dev);
 }
 
-struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp);
+struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
+                                       unsigned int flags, unsigned int size,
+                                       unsigned int *err);
+struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
+
+int nfc_set_remote_general_bytes(struct nfc_dev *dev,
+                                       u8 *gt, u8 gt_len);
+
+u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len);
 
 int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
                                                        int ntargets);
 
+int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
+                      u8 comm_mode, u8 rf_mode);
+
 #endif /* __NET_NFC_H */
index e182e13..875f489 100644 (file)
@@ -25,7 +25,7 @@
 #define _PROTOCOL_H
 
 #include <linux/in6.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
@@ -46,7 +46,7 @@ struct net_protocol {
                                netns_ok:1;
 };
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct inet6_protocol {
        int     (*handler)(struct sk_buff *skb);
 
@@ -91,7 +91,7 @@ struct inet_protosw {
 
 extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
 #endif
 
@@ -100,7 +100,7 @@ extern int  inet_del_protocol(const struct net_protocol *prot, unsigned char num)
 extern void    inet_register_protosw(struct inet_protosw *p);
 extern void    inet_unregister_protosw(struct inet_protosw *p);
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 extern int     inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
 extern int     inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
 extern int     inet6_register_protosw(struct inet_protosw *p);
index b72a3b8..ef715a1 100644 (file)
@@ -5,6 +5,7 @@
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
 #include <net/dsfield.h>
+#include <linux/reciprocal_div.h>
 
 /*     Random Early Detection (RED) algorithm.
        =======================================
        etc.
  */
 
+/*
+ * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM
+ * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001
+ *
+ * Every 500 ms:
+ *  if (avg > target and max_p <= 0.5)
+ *   increase max_p : max_p += alpha;
+ *  else if (avg < target and max_p >= 0.01)
+ *   decrease max_p : max_p *= beta;
+ *
+ * target :[qth_min + 0.4*(qth_min - qth_max),
+ *          qth_min + 0.6*(qth_min - qth_max)].
+ * alpha : min(0.01, max_p / 4)
+ * beta : 0.9
+ * max_P is a Q0.32 fixed point number (with 32 bits mantissa)
+ * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ]
+ */
+#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100))
+
+#define MAX_P_MIN (1 * RED_ONE_PERCENT)
+#define MAX_P_MAX (50 * RED_ONE_PERCENT)
+#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4)
+
 #define RED_STAB_SIZE  256
 #define RED_STAB_MASK  (RED_STAB_SIZE - 1)
 
@@ -101,10 +125,14 @@ struct red_stats {
 
 struct red_parms {
        /* Parameters */
-       u32             qth_min;        /* Min avg length threshold: A scaled */
-       u32             qth_max;        /* Max avg length threshold: A scaled */
+       u32             qth_min;        /* Min avg length threshold: Wlog scaled */
+       u32             qth_max;        /* Max avg length threshold: Wlog scaled */
        u32             Scell_max;
-       u32             Rmask;          /* Cached random mask, see red_rmask */
+       u32             max_P;          /* probability, [0 .. 1.0] 32 scaled */
+       u32             max_P_reciprocal; /* reciprocal_value(max_P / qth_delta) */
+       u32             qth_delta;      /* max_th - min_th */
+       u32             target_min;     /* min_th + 0.4*(max_th - min_th) */
+       u32             target_max;     /* min_th + 0.6*(max_th - min_th) */
        u8              Scell_log;
        u8              Wlog;           /* log(W)               */
        u8              Plog;           /* random number bits   */
@@ -115,19 +143,23 @@ struct red_parms {
                                           number generation */
        u32             qR;             /* Cached random number */
 
-       unsigned long   qavg;           /* Average queue length: A scaled */
+       unsigned long   qavg;           /* Average queue length: Wlog scaled */
        ktime_t         qidlestart;     /* Start of current idle period */
 };
 
-static inline u32 red_rmask(u8 Plog)
+static inline u32 red_maxp(u8 Plog)
 {
-       return Plog < 32 ? ((1 << Plog) - 1) : ~0UL;
+       return Plog < 32 ? (~0U >> Plog) : ~0U;
 }
 
+
 static inline void red_set_parms(struct red_parms *p,
                                 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
-                                u8 Scell_log, u8 *stab)
+                                u8 Scell_log, u8 *stab, u32 max_P)
 {
+       int delta = qth_max - qth_min;
+       u32 max_p_delta;
+
        /* Reset average queue length, the value is strictly bound
         * to the parameters below, reseting hurts a bit but leaving
         * it might result in an unreasonable qavg for a while. --TGR
@@ -139,14 +171,33 @@ static inline void red_set_parms(struct red_parms *p,
        p->qth_max      = qth_max << Wlog;
        p->Wlog         = Wlog;
        p->Plog         = Plog;
-       p->Rmask        = red_rmask(Plog);
+       if (delta < 0)
+               delta = 1;
+       p->qth_delta    = delta;
+       if (!max_P) {
+               max_P = red_maxp(Plog);
+               max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
+       }
+       p->max_P = max_P;
+       max_p_delta = max_P / delta;
+       max_p_delta = max(max_p_delta, 1U);
+       p->max_P_reciprocal  = reciprocal_value(max_p_delta);
+
+       /* RED Adaptative target :
+        * [min_th + 0.4*(min_th - max_th),
+        *  min_th + 0.6*(min_th - max_th)].
+        */
+       delta /= 5;
+       p->target_min = qth_min + 2*delta;
+       p->target_max = qth_min + 3*delta;
+
        p->Scell_log    = Scell_log;
        p->Scell_max    = (255 << Scell_log);
 
        memcpy(p->Stab, stab, sizeof(p->Stab));
 }
 
-static inline int red_is_idling(struct red_parms *p)
+static inline int red_is_idling(const struct red_parms *p)
 {
        return p->qidlestart.tv64 != 0;
 }
@@ -168,7 +219,7 @@ static inline void red_restart(struct red_parms *p)
        p->qcount = -1;
 }
 
-static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
+static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p)
 {
        s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
        long us_idle = min_t(s64, delta, p->Scell_max);
@@ -215,7 +266,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
        }
 }
 
-static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
+static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
                                                       unsigned int backlog)
 {
        /*
@@ -230,7 +281,7 @@ static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
        return p->qavg + (backlog - (p->qavg >> p->Wlog));
 }
 
-static inline unsigned long red_calc_qavg(struct red_parms *p,
+static inline unsigned long red_calc_qavg(const struct red_parms *p,
                                          unsigned int backlog)
 {
        if (!red_is_idling(p))
@@ -239,23 +290,24 @@ static inline unsigned long red_calc_qavg(struct red_parms *p,
                return red_calc_qavg_from_idle_time(p);
 }
 
-static inline u32 red_random(struct red_parms *p)
+
+static inline u32 red_random(const struct red_parms *p)
 {
-       return net_random() & p->Rmask;
+       return reciprocal_divide(net_random(), p->max_P_reciprocal);
 }
 
-static inline int red_mark_probability(struct red_parms *p, unsigned long qavg)
+static inline int red_mark_probability(const struct red_parms *p, unsigned long qavg)
 {
        /* The formula used below causes questions.
 
-          OK. qR is random number in the interval 0..Rmask
+          OK. qR is random number in the interval
+               (0..1/max_P)*(qth_max-qth_min)
           i.e. 0..(2^Plog). If we used floating point
           arithmetics, it would be: (2^Plog)*rnd_num,
           where rnd_num is less 1.
 
           Taking into account, that qavg have fixed
-          point at Wlog, and Plog is related to max_P by
-          max_P = (qth_max-qth_min)/2^Plog; two lines
+          point at Wlog, two lines
           below have the following floating point equivalent:
 
           max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
@@ -315,4 +367,25 @@ static inline int red_action(struct red_parms *p, unsigned long qavg)
        return RED_DONT_MARK;
 }
 
+static inline void red_adaptative_algo(struct red_parms *p)
+{
+       unsigned long qavg;
+       u32 max_p_delta;
+
+       qavg = p->qavg;
+       if (red_is_idling(p))
+               qavg = red_calc_qavg_from_idle_time(p);
+
+       /* p->qavg is fixed point number with point at Wlog */
+       qavg >>= p->Wlog;
+
+       if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
+               p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */
+       else if (qavg < p->target_min && p->max_P >= MAX_P_MIN)
+               p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */
+
+       max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta);
+       max_p_delta = max(max_p_delta, 1U);
+       p->max_P_reciprocal = reciprocal_value(max_p_delta);
+}
 #endif
index 6a72a58..d368561 100644 (file)
@@ -71,7 +71,7 @@
 #include <linux/jiffies.h>
 #include <linux/idr.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
 #endif
@@ -383,7 +383,7 @@ static inline void sctp_sysctl_unregister(void) { return; }
 /* Size of Supported Address Parameter for 'x' address types. */
 #define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 void sctp_v6_pf_init(void);
 void sctp_v6_pf_exit(void);
index 3382615..88949a9 100644 (file)
@@ -235,12 +235,15 @@ extern struct sctp_globals {
 
        /* Flag to indicate whether computing and verifying checksum
         * is disabled. */
-        int checksum_disable;
+        bool checksum_disable;
 
        /* Threshold for rwnd update SACKS.  Receive buffer shifted this many
         * bits is an indicator of when to send and window update SACK.
         */
        int rwnd_update_shift;
+
+       /* Threshold for autoclose timeout, in seconds. */
+       unsigned long max_autoclose;
 } sctp_globals;
 
 #define sctp_rto_initial               (sctp_globals.rto_initial)
@@ -281,6 +284,7 @@ extern struct sctp_globals {
 #define sctp_auth_enable               (sctp_globals.auth_enable)
 #define sctp_checksum_disable          (sctp_globals.checksum_disable)
 #define sctp_rwnd_upd_shift            (sctp_globals.rwnd_update_shift)
+#define sctp_max_autoclose             (sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
@@ -365,7 +369,7 @@ static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
        return (struct sock *)sp;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct sctp6_sock {
        struct sctp_sock  sctp;
        struct ipv6_pinfo inet6;
index 8ac338c..bb972d2 100644 (file)
@@ -53,6 +53,8 @@
 #include <linux/security.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/memcontrol.h>
+#include <linux/res_counter.h>
 
 #include <linux/filter.h>
 #include <linux/rculist_nulls.h>
 #include <net/dst.h>
 #include <net/checksum.h>
 
+struct cgroup;
+struct cgroup_subsys;
+#ifdef CONFIG_NET
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss);
+#else
+static inline
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       return 0;
+}
+static inline
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+}
+#endif
 /*
  * This structure really needs to be cleaned up.
  * Most of it is for TCP, and not used by any of
@@ -167,6 +185,7 @@ struct sock_common {
        /* public: */
 };
 
+struct cg_proto;
 /**
   *    struct sock - network layer representation of sockets
   *    @__sk_common: shared layout with inet_timewait_sock
@@ -227,6 +246,7 @@ struct sock_common {
   *    @sk_security: used by security modules
   *    @sk_mark: generic packet mark
   *    @sk_classid: this socket's cgroup classid
+  *    @sk_cgrp: this socket's cgroup-specific proto data
   *    @sk_write_pending: a write to stream socket waits to start
   *    @sk_state_change: callback to indicate change in the state of the sock
   *    @sk_data_ready: callback to indicate there is data to be processed
@@ -341,6 +361,7 @@ struct sock {
 #endif
        __u32                   sk_mark;
        u32                     sk_classid;
+       struct cg_proto         *sk_cgrp;
        void                    (*sk_state_change)(struct sock *sk);
        void                    (*sk_data_ready)(struct sock *sk, int bytes);
        void                    (*sk_write_space)(struct sock *sk);
@@ -641,12 +662,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 
 /*
  * Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
  */
 static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
 {
        unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-       return qsize + skb->truesize > sk->sk_rcvbuf;
+       return qsize > sk->sk_rcvbuf;
 }
 
 /* The per-socket spinlock must be held here. */
@@ -837,6 +860,37 @@ struct proto {
 #ifdef SOCK_REFCNT_DEBUG
        atomic_t                socks;
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       /*
+        * cgroup specific init/deinit functions. Called once for all
+        * protocols that implement it, from cgroups populate function.
+        * This function has to setup any files the protocol want to
+        * appear in the kmem cgroup filesystem.
+        */
+       int                     (*init_cgroup)(struct cgroup *cgrp,
+                                              struct cgroup_subsys *ss);
+       void                    (*destroy_cgroup)(struct cgroup *cgrp,
+                                                 struct cgroup_subsys *ss);
+       struct cg_proto         *(*proto_cgroup)(struct mem_cgroup *memcg);
+#endif
+};
+
+struct cg_proto {
+       void                    (*enter_memory_pressure)(struct sock *sk);
+       struct res_counter      *memory_allocated;      /* Current allocated memory. */
+       struct percpu_counter   *sockets_allocated;     /* Current number of sockets. */
+       int                     *memory_pressure;
+       long                    *sysctl_mem;
+       /*
+        * memcg field is used to find which memcg we belong directly
+        * Each memcg struct can hold more than one cg_proto, so container_of
+        * won't really cut.
+        *
+        * The elegant solution would be having an inverse function to
+        * proto_cgroup in struct proto, but that means polluting the structure
+        * for everybody, instead of just for memcg users.
+        */
+       struct mem_cgroup       *memcg;
 };
 
 extern int proto_register(struct proto *prot, int alloc_slab);
@@ -855,7 +909,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
               sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
 }
 
-static inline void sk_refcnt_debug_release(const struct sock *sk)
+inline void sk_refcnt_debug_release(const struct sock *sk)
 {
        if (atomic_read(&sk->sk_refcnt) != 1)
                printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
@@ -867,6 +921,208 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
 #define sk_refcnt_debug_release(sk) do { } while (0)
 #endif /* SOCK_REFCNT_DEBUG */
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+extern struct jump_label_key memcg_socket_limit_enabled;
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+                                              struct cg_proto *cg_proto)
+{
+       return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
+}
+#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
+#else
+#define mem_cgroup_sockets_enabled 0
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+                                              struct cg_proto *cg_proto)
+{
+       return NULL;
+}
+#endif
+
+
+static inline bool sk_has_memory_pressure(const struct sock *sk)
+{
+       return sk->sk_prot->memory_pressure != NULL;
+}
+
+static inline bool sk_under_memory_pressure(const struct sock *sk)
+{
+       if (!sk->sk_prot->memory_pressure)
+               return false;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               return !!*sk->sk_cgrp->memory_pressure;
+
+       return !!*sk->sk_prot->memory_pressure;
+}
+
+static inline void sk_leave_memory_pressure(struct sock *sk)
+{
+       int *memory_pressure = sk->sk_prot->memory_pressure;
+
+       if (!memory_pressure)
+               return;
+
+       if (*memory_pressure)
+               *memory_pressure = 0;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+               struct proto *prot = sk->sk_prot;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       if (*cg_proto->memory_pressure)
+                               *cg_proto->memory_pressure = 0;
+       }
+
+}
+
+static inline void sk_enter_memory_pressure(struct sock *sk)
+{
+       if (!sk->sk_prot->enter_memory_pressure)
+               return;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+               struct proto *prot = sk->sk_prot;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       cg_proto->enter_memory_pressure(sk);
+       }
+
+       sk->sk_prot->enter_memory_pressure(sk);
+}
+
+static inline long sk_prot_mem_limits(const struct sock *sk, int index)
+{
+       long *prot = sk->sk_prot->sysctl_mem;
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               prot = sk->sk_cgrp->sysctl_mem;
+       return prot[index];
+}
+
+static inline void memcg_memory_allocated_add(struct cg_proto *prot,
+                                             unsigned long amt,
+                                             int *parent_status)
+{
+       struct res_counter *fail;
+       int ret;
+
+       ret = res_counter_charge(prot->memory_allocated,
+                                amt << PAGE_SHIFT, &fail);
+
+       if (ret < 0)
+               *parent_status = OVER_LIMIT;
+}
+
+static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
+                                             unsigned long amt)
+{
+       res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
+}
+
+static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
+{
+       u64 ret;
+       ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
+       return ret >> PAGE_SHIFT;
+}
+
+static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               return memcg_memory_allocated_read(sk->sk_cgrp);
+
+       return atomic_long_read(prot->memory_allocated);
+}
+
+static inline long
+sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
+               /* update the root cgroup regardless */
+               atomic_long_add_return(amt, prot->memory_allocated);
+               return memcg_memory_allocated_read(sk->sk_cgrp);
+       }
+
+       return atomic_long_add_return(amt, prot->memory_allocated);
+}
+
+static inline void
+sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
+           parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
+               memcg_memory_allocated_sub(sk->sk_cgrp, amt);
+
+       atomic_long_sub(amt, prot->memory_allocated);
+}
+
+static inline void sk_sockets_allocated_dec(struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       percpu_counter_dec(cg_proto->sockets_allocated);
+       }
+
+       percpu_counter_dec(prot->sockets_allocated);
+}
+
+static inline void sk_sockets_allocated_inc(struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       percpu_counter_inc(cg_proto->sockets_allocated);
+       }
+
+       percpu_counter_inc(prot->sockets_allocated);
+}
+
+static inline int
+sk_sockets_allocated_read_positive(struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated);
+
+       return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline int
+proto_sockets_allocated_sum_positive(struct proto *prot)
+{
+       return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline long
+proto_memory_allocated(struct proto *prot)
+{
+       return atomic_long_read(prot->memory_allocated);
+}
+
+static inline bool
+proto_memory_pressure(struct proto *prot)
+{
+       if (!prot->memory_pressure)
+               return false;
+       return !!*prot->memory_pressure;
+}
+
 
 #ifdef CONFIG_PROC_FS
 /* Called with local bh disabled */
@@ -1674,7 +1930,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
 
        page = alloc_pages(sk->sk_allocation, 0);
        if (!page) {
-               sk->sk_prot->enter_memory_pressure(sk);
+               sk_enter_memory_pressure(sk);
                sk_stream_moderate_sndbuf(sk);
        }
        return page;
index 87e3c80..0118ea9 100644 (file)
@@ -44,6 +44,7 @@
 #include <net/dst.h>
 
 #include <linux/seq_file.h>
+#include <linux/memcontrol.h>
 
 extern struct inet_hashinfo tcp_hashinfo;
 
@@ -229,7 +230,6 @@ extern int sysctl_tcp_fack;
 extern int sysctl_tcp_reordering;
 extern int sysctl_tcp_ecn;
 extern int sysctl_tcp_dsack;
-extern long sysctl_tcp_mem[3];
 extern int sysctl_tcp_wmem[3];
 extern int sysctl_tcp_rmem[3];
 extern int sysctl_tcp_app_win;
@@ -285,7 +285,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
        }
 
        if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
-           atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+           sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
                return true;
        return false;
 }
@@ -628,7 +628,7 @@ extern u32 __tcp_select_window(struct sock *sk);
 struct tcp_skb_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;       /* For incoming frames          */
@@ -773,12 +773,12 @@ static inline int tcp_is_reno(const struct tcp_sock *tp)
 
 static inline int tcp_is_fack(const struct tcp_sock *tp)
 {
-       return tp->rx_opt.sack_ok & 2;
+       return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
 }
 
 static inline void tcp_enable_fack(struct tcp_sock *tp)
 {
-       tp->rx_opt.sack_ok |= 2;
+       tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
 }
 
 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
@@ -1152,7 +1152,7 @@ struct tcp6_md5sig_key {
 /* - sock block */
 struct tcp_md5sig_info {
        struct tcp4_md5sig_key  *keys4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct tcp6_md5sig_key  *keys6;
        u32                     entries6;
        u32                     alloced6;
@@ -1179,7 +1179,7 @@ struct tcp6_pseudohdr {
 
 union tcp_md5sum_block {
        struct tcp4_pseudohdr ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct tcp6_pseudohdr ip6;
 #endif
 };
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h
new file mode 100644 (file)
index 0000000..3512082
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef _TCP_MEMCG_H
+#define _TCP_MEMCG_H
+
+struct tcp_memcontrol {
+       struct cg_proto cg_proto;
+       /* per-cgroup tcp memory pressure knobs */
+       struct res_counter tcp_memory_allocated;
+       struct percpu_counter tcp_sockets_allocated;
+       /* those two are read-mostly, leave them at the end */
+       long tcp_prot_mem[3];
+       int tcp_memory_pressure;
+};
+
+struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
+int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
+unsigned long long tcp_max_memory(const struct mem_cgroup *memcg);
+void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx);
+#endif /* _TCP_MEMCG_H */
index f54a515..e39592f 100644 (file)
@@ -41,7 +41,7 @@
 struct udp_skb_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
@@ -194,9 +194,15 @@ extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
 extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
                                    __be32 daddr, __be16 dport,
                                    int dif);
+extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+                                   __be32 daddr, __be16 dport,
+                                   int dif, struct udp_table *tbl);
 extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
                                    const struct in6_addr *daddr, __be16 dport,
                                    int dif);
+extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
+                                   const struct in6_addr *daddr, __be16 dport,
+                                   int dif, struct udp_table *tbl);
 
 /*
  *     SNMP statistics for UDP and UDP-Lite
@@ -217,7 +223,7 @@ extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *sadd
        else        SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field);      \
 } while(0)
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #define UDPX_INC_STATS_BH(sk, field) \
        do { \
                if ((sk)->sk_family == AF_INET) \
index d1e95c6..5a35a2a 100644 (file)
@@ -147,6 +147,7 @@ struct fcoe_ctlr {
        u8 map_dest;
        u8 spma;
        u8 probe_tries;
+       u8 priority;
        u8 dest_addr[ETH_ALEN];
        u8 ctl_src_addr[ETH_ALEN];
 
@@ -301,6 +302,7 @@ struct fcoe_percpu_s {
  * @lport:                    The associated local port
  * @fcoe_pending_queue:               The pending Rx queue of skbs
  * @fcoe_pending_queue_active: Indicates if the pending queue is active
+ * @priority:                 Packet priority (DCB)
  * @max_queue_depth:          Max queue depth of pending queue
  * @min_queue_depth:          Min queue depth of pending queue
  * @timer:                    The queue timer
@@ -316,6 +318,7 @@ struct fcoe_port {
        struct fc_lport       *lport;
        struct sk_buff_head   fcoe_pending_queue;
        u8                    fcoe_pending_queue_active;
+       u8                    priority;
        u32                   max_queue_depth;
        u32                   min_queue_depth;
        struct timer_list     timer;
index 7f5fed3..6873c7d 100644 (file)
@@ -103,9 +103,10 @@ enum se_cmd_flags_table {
        SCF_SCSI_NON_DATA_CDB           = 0x00000040,
        SCF_SCSI_CDB_EXCEPTION          = 0x00000080,
        SCF_SCSI_RESERVATION_CONFLICT   = 0x00000100,
-       SCF_SE_CMD_FAILED               = 0x00000400,
+       SCF_FUA                         = 0x00000200,
        SCF_SE_LUN_CMD                  = 0x00000800,
        SCF_SE_ALLOW_EOO                = 0x00001000,
+       SCF_BIDI                        = 0x00002000,
        SCF_SENT_CHECK_CONDITION        = 0x00004000,
        SCF_OVERFLOW_BIT                = 0x00008000,
        SCF_UNDERFLOW_BIT               = 0x00010000,
@@ -154,6 +155,7 @@ enum tcm_sense_reason_table {
        TCM_CHECK_CONDITION_ABORT_CMD           = 0x0d,
        TCM_CHECK_CONDITION_UNIT_ATTENTION      = 0x0e,
        TCM_CHECK_CONDITION_NOT_READY           = 0x0f,
+       TCM_RESERVATION_CONFLICT                = 0x10,
 };
 
 struct se_obj {
@@ -211,7 +213,6 @@ struct t10_alua_lu_gp {
        u16     lu_gp_id;
        int     lu_gp_valid_id;
        u32     lu_gp_members;
-       atomic_t lu_gp_shutdown;
        atomic_t lu_gp_ref_cnt;
        spinlock_t lu_gp_lock;
        struct config_group lu_gp_group;
@@ -422,11 +423,9 @@ struct se_cmd {
        int                     sam_task_attr;
        /* Transport protocol dependent state, see transport_state_table */
        enum transport_state_table t_state;
-       /* Transport specific error status */
-       int                     transport_error_status;
        /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
-       int                     check_release:1;
-       int                     cmd_wait_set:1;
+       unsigned                check_release:1;
+       unsigned                cmd_wait_set:1;
        /* See se_cmd_flags_table */
        u32                     se_cmd_flags;
        u32                     se_ordered_id;
@@ -441,13 +440,10 @@ struct se_cmd {
        /* Used for sense data */
        void                    *sense_buffer;
        struct list_head        se_delayed_node;
-       struct list_head        se_ordered_node;
        struct list_head        se_lun_node;
        struct list_head        se_qf_node;
        struct se_device      *se_dev;
        struct se_dev_entry   *se_deve;
-       struct se_device        *se_obj_ptr;
-       struct se_device        *se_orig_obj_ptr;
        struct se_lun           *se_lun;
        /* Only used for internal passthrough and legacy TCM fabric modules */
        struct se_session       *se_sess;
@@ -463,8 +459,6 @@ struct se_cmd {
        unsigned char           __t_task_cdb[TCM_MAX_COMMAND_SIZE];
        unsigned long long      t_task_lba;
        int                     t_tasks_failed;
-       int                     t_tasks_fua;
-       bool                    t_tasks_bidi;
        u32                     t_tasks_sg_chained_no;
        atomic_t                t_fe_count;
        atomic_t                t_se_count;
@@ -489,14 +483,6 @@ struct se_cmd {
 
        struct work_struct      work;
 
-       /*
-        * Used for pre-registered fabric SGL passthrough WRITE and READ
-        * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
-        * and other HW target mode fabric modules.
-        */
-       struct scatterlist      *t_task_pt_sgl;
-       u32                     t_task_pt_sgl_num;
-
        struct scatterlist      *t_data_sg;
        unsigned int            t_data_nents;
        struct scatterlist      *t_bidi_data_sg;
@@ -562,7 +548,7 @@ struct se_node_acl {
 } ____cacheline_aligned;
 
 struct se_session {
-       int                     sess_tearing_down:1;
+       unsigned                sess_tearing_down:1;
        u64                     sess_bin_isid;
        struct se_node_acl      *se_node_acl;
        struct se_portal_group *se_tpg;
@@ -683,7 +669,6 @@ struct se_subsystem_dev {
        struct t10_reservation t10_pr;
        spinlock_t      se_dev_lock;
        void            *se_dev_su_ptr;
-       struct list_head se_dev_node;
        struct config_group se_dev_group;
        /* For T10 Reservations */
        struct config_group se_dev_pr_group;
@@ -692,9 +677,6 @@ struct se_subsystem_dev {
 } ____cacheline_aligned;
 
 struct se_device {
-       /* Set to 1 if thread is NOT sleeping on thread_sem */
-       u8                      thread_active;
-       u8                      dev_status_timer_flags;
        /* RELATIVE TARGET PORT IDENTIFER Counter */
        u16                     dev_rpti_counter;
        /* Used for SAM Task Attribute ordering */
@@ -719,14 +701,10 @@ struct se_device {
        u64                     write_bytes;
        spinlock_t              stats_lock;
        /* Active commands on this virtual SE device */
-       atomic_t                active_cmds;
        atomic_t                simple_cmds;
        atomic_t                depth_left;
        atomic_t                dev_ordered_id;
-       atomic_t                dev_tur_active;
        atomic_t                execute_tasks;
-       atomic_t                dev_status_thr_count;
-       atomic_t                dev_hoq_count;
        atomic_t                dev_ordered_sync;
        atomic_t                dev_qf_count;
        struct se_obj           dev_obj;
@@ -734,14 +712,9 @@ struct se_device {
        struct se_obj           dev_export_obj;
        struct se_queue_obj     dev_queue_obj;
        spinlock_t              delayed_cmd_lock;
-       spinlock_t              ordered_cmd_lock;
        spinlock_t              execute_task_lock;
-       spinlock_t              state_task_lock;
-       spinlock_t              dev_alua_lock;
        spinlock_t              dev_reservation_lock;
-       spinlock_t              dev_state_lock;
        spinlock_t              dev_status_lock;
-       spinlock_t              dev_status_thr_lock;
        spinlock_t              se_port_lock;
        spinlock_t              se_tmr_lock;
        spinlock_t              qf_cmd_lock;
@@ -753,14 +726,10 @@ struct se_device {
        struct t10_pr_registration *dev_pr_res_holder;
        struct list_head        dev_sep_list;
        struct list_head        dev_tmr_list;
-       struct timer_list       dev_status_timer;
        /* Pointer to descriptor for processing thread */
        struct task_struct      *process_thread;
-       pid_t                   process_thread_pid;
-       struct task_struct              *dev_mgmt_thread;
        struct work_struct      qf_work_queue;
        struct list_head        delayed_cmd_list;
-       struct list_head        ordered_cmd_list;
        struct list_head        execute_task_list;
        struct list_head        state_task_list;
        struct list_head        qf_cmd_list;
@@ -771,8 +740,6 @@ struct se_device {
        struct se_subsystem_api *transport;
        /* Linked list for struct se_hba struct se_device list */
        struct list_head        dev_list;
-       /* Linked list for struct se_global->g_se_dev_list */
-       struct list_head        g_se_dev_list;
 }  ____cacheline_aligned;
 
 struct se_hba {
@@ -834,7 +801,6 @@ struct se_port {
        u32             sep_index;
        struct scsi_port_stats sep_stats;
        /* Used for ALUA Target Port Groups membership */
-       atomic_t        sep_tg_pt_gp_active;
        atomic_t        sep_tg_pt_secondary_offline;
        /* Used for PR ALL_TG_PT=1 */
        atomic_t        sep_tg_pt_ref_cnt;
index c16e943..dac4f2d 100644 (file)
 
 #define PYX_TRANSPORT_STATUS_INTERVAL          5 /* In seconds */
 
-#define PYX_TRANSPORT_SENT_TO_TRANSPORT                0
-#define PYX_TRANSPORT_WRITE_PENDING            1
-
-#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE       -1
-#define PYX_TRANSPORT_HBA_QUEUE_FULL           -2
-#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS     -3
-#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES  -4
-#define PYX_TRANSPORT_INVALID_CDB_FIELD                -5
-#define PYX_TRANSPORT_INVALID_PARAMETER_LIST   -6
-#define PYX_TRANSPORT_LU_COMM_FAILURE          -7
-#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE                -8
-#define PYX_TRANSPORT_WRITE_PROTECTED          -9
-#define PYX_TRANSPORT_RESERVATION_CONFLICT     -10
-#define PYX_TRANSPORT_ILLEGAL_REQUEST          -11
-#define PYX_TRANSPORT_USE_SENSE_REASON         -12
-
-#ifndef SAM_STAT_RESERVATION_CONFLICT
-#define SAM_STAT_RESERVATION_CONFLICT          0x18
-#endif
-
-#define TRANSPORT_PLUGIN_FREE                  0
-#define TRANSPORT_PLUGIN_REGISTERED            1
-
 #define TRANSPORT_PLUGIN_PHBA_PDEV             1
 #define TRANSPORT_PLUGIN_VHBA_PDEV             2
 #define TRANSPORT_PLUGIN_VHBA_VDEV             3
@@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
 extern int transport_handle_cdb_direct(struct se_cmd *);
 extern int transport_generic_handle_cdb_map(struct se_cmd *);
 extern int transport_generic_handle_data(struct se_cmd *);
-extern void transport_new_cmd_failure(struct se_cmd *);
 extern int transport_generic_handle_tmr(struct se_cmd *);
 extern bool target_stop_task(struct se_task *task, unsigned long *flags);
 extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
index f0b6890..f6f07aa 100644 (file)
@@ -29,8 +29,7 @@ enum xsd_sockmsg_type
     XS_IS_DOMAIN_INTRODUCED,
     XS_RESUME,
     XS_SET_TARGET,
-    XS_RESTRICT,
-    XS_RESET_WATCHES
+    XS_RESTRICT
 };
 
 #define XS_WRITE_NONE "NONE"
index 43298f9..b8930d5 100644 (file)
@@ -689,6 +689,17 @@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLED
          For those who want to have the feature enabled by default should
          select this option (if, for some reason, they need to disable it
          then swapaccount=0 does the trick).
+config CGROUP_MEM_RES_CTLR_KMEM
+       bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)"
+       depends on CGROUP_MEM_RES_CTLR && EXPERIMENTAL
+       default n
+       help
+         The Kernel Memory extension for Memory Resource Controller can limit
+         the amount of memory used by kernel objects in the system. Those are
+         fundamentally different from the entities handled by the standard
+         Memory Controller, which are page-based, and can be swapped. Users of
+         the kmem extension can use it to guarantee that no group of processes
+         will ever exhaust kernel resources alone.
 
 config CGROUP_PERF
        bool "Enable perf_event per-cpu per-container group (cgroup) monitoring"
index 2e0ecfc..5b4293d 100644 (file)
@@ -1269,7 +1269,7 @@ void mq_clear_sbinfo(struct ipc_namespace *ns)
 
 void mq_put_mnt(struct ipc_namespace *ns)
 {
-       mntput(ns->mq_mnt);
+       kern_unmount(ns->mq_mnt);
 }
 
 static int __init init_mqueue_fs(void)
@@ -1291,11 +1291,9 @@ static int __init init_mqueue_fs(void)
 
        spin_lock_init(&mq_lock);
 
-       init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
-       if (IS_ERR(init_ipc_ns.mq_mnt)) {
-               error = PTR_ERR(init_ipc_ns.mq_mnt);
+       error = mq_init_ns(&init_ipc_ns);
+       if (error)
                goto out_filesystem;
-       }
 
        return 0;
 
index 8b5ce5d..5652101 100644 (file)
@@ -27,11 +27,6 @@ DEFINE_SPINLOCK(mq_lock);
  */
 struct ipc_namespace init_ipc_ns = {
        .count          = ATOMIC_INIT(1),
-#ifdef CONFIG_POSIX_MQUEUE
-       .mq_queues_max   = DFLT_QUEUESMAX,
-       .mq_msg_max      = DFLT_MSGMAX,
-       .mq_msgsize_max  = DFLT_MSGSIZEMAX,
-#endif
        .user_ns = &init_user_ns,
 };
 
index d9d5648..a184470 100644 (file)
@@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
                        continue;
                /* get old css_set pointer */
                task_lock(tsk);
-               if (tsk->flags & PF_EXITING) {
-                       /* ignore this task if it's going away */
-                       task_unlock(tsk);
-                       continue;
-               }
                oldcg = tsk->cgroups;
                get_css_set(oldcg);
                task_unlock(tsk);
index 9fe58c4..0b1712d 100644 (file)
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
                            struct cpuset, css);
 }
 
+#ifdef CONFIG_NUMA
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+       return task->mempolicy;
+}
+#else
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+       return false;
+}
+#endif
+
+
 /* bits in struct cpuset flags field */
 typedef enum {
        CS_CPU_EXCLUSIVE,
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 static void cpuset_change_task_nodemask(struct task_struct *tsk,
                                        nodemask_t *newmems)
 {
-       bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
+       bool need_loop;
 
 repeat:
        /*
@@ -962,6 +975,14 @@ repeat:
                return;
 
        task_lock(tsk);
+       /*
+        * Determine if a loop is necessary if another thread is doing
+        * get_mems_allowed().  If at least one node remains unchanged and
+        * tsk does not have a mempolicy, then an empty nodemask will not be
+        * possible when mems_allowed is larger than a word.
+        */
+       need_loop = task_has_mempolicy(tsk) ||
+                       !nodes_intersects(*newmems, tsk->mems_allowed);
        nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
        mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
 
@@ -981,11 +1002,9 @@ repeat:
 
        /*
         * Allocation of memory is very fast, we needn't sleep when waiting
-        * for the read-side.  No wait is necessary, however, if at least one
-        * node remains unchanged.
+        * for the read-side.
         */
-       while (masks_disjoint &&
-                       ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+       while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
                task_unlock(tsk);
                if (!task_curr(tsk))
                        yield();
index 0e8457d..58690af 100644 (file)
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
 static void update_context_time(struct perf_event_context *ctx);
 static u64 perf_event_time(struct perf_event *event);
 
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb);
+
 void __weak perf_event_print_debug(void)       { }
 
 extern __weak const char *perf_pmu_name(void)
@@ -2171,9 +2174,10 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         */
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 
-       perf_event_sched_in(cpuctx, ctx, task);
+       if (ctx->nr_events)
+               cpuctx->task_ctx = ctx;
 
-       cpuctx->task_ctx = ctx;
+       perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
 
        perf_pmu_enable(ctx->pmu);
        perf_ctx_unlock(cpuctx, ctx);
@@ -3190,12 +3194,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
        struct ring_buffer *rb;
        unsigned int events = POLL_HUP;
 
+       /*
+        * Race between perf_event_set_output() and perf_poll(): perf_poll()
+        * grabs the rb reference but perf_event_set_output() overrides it.
+        * Here is the timeline for two threads T1, T2:
+        * t0: T1, rb = rcu_dereference(event->rb)
+        * t1: T2, old_rb = event->rb
+        * t2: T2, event->rb = new rb
+        * t3: T2, ring_buffer_detach(old_rb)
+        * t4: T1, ring_buffer_attach(rb1)
+        * t5: T1, poll_wait(event->waitq)
+        *
+        * To avoid this problem, we grab mmap_mutex in perf_poll()
+        * thereby ensuring that the assignment of the new ring buffer
+        * and the detachment of the old buffer appear atomic to perf_poll()
+        */
+       mutex_lock(&event->mmap_mutex);
+
        rcu_read_lock();
        rb = rcu_dereference(event->rb);
-       if (rb)
+       if (rb) {
+               ring_buffer_attach(event, rb);
                events = atomic_xchg(&rb->poll, 0);
+       }
        rcu_read_unlock();
 
+       mutex_unlock(&event->mmap_mutex);
+
        poll_wait(file, &event->waitq, wait);
 
        return events;
@@ -3496,6 +3521,53 @@ unlock:
        return ret;
 }
 
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb)
+{
+       unsigned long flags;
+
+       if (!list_empty(&event->rb_entry))
+               return;
+
+       spin_lock_irqsave(&rb->event_lock, flags);
+       if (!list_empty(&event->rb_entry))
+               goto unlock;
+
+       list_add(&event->rb_entry, &rb->event_list);
+unlock:
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_detach(struct perf_event *event,
+                              struct ring_buffer *rb)
+{
+       unsigned long flags;
+
+       if (list_empty(&event->rb_entry))
+               return;
+
+       spin_lock_irqsave(&rb->event_lock, flags);
+       list_del_init(&event->rb_entry);
+       wake_up_all(&event->waitq);
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_wakeup(struct perf_event *event)
+{
+       struct ring_buffer *rb;
+
+       rcu_read_lock();
+       rb = rcu_dereference(event->rb);
+       if (!rb)
+               goto unlock;
+
+       list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+               wake_up_all(&event->waitq);
+
+unlock:
+       rcu_read_unlock();
+}
+
 static void rb_free_rcu(struct rcu_head *rcu_head)
 {
        struct ring_buffer *rb;
@@ -3521,9 +3593,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
 
 static void ring_buffer_put(struct ring_buffer *rb)
 {
+       struct perf_event *event, *n;
+       unsigned long flags;
+
        if (!atomic_dec_and_test(&rb->refcount))
                return;
 
+       spin_lock_irqsave(&rb->event_lock, flags);
+       list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+               list_del_init(&event->rb_entry);
+               wake_up_all(&event->waitq);
+       }
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+
        call_rcu(&rb->rcu_head, rb_free_rcu);
 }
 
@@ -3546,6 +3628,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
                atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
                vma->vm_mm->pinned_vm -= event->mmap_locked;
                rcu_assign_pointer(event->rb, NULL);
+               ring_buffer_detach(event, rb);
                mutex_unlock(&event->mmap_mutex);
 
                ring_buffer_put(rb);
@@ -3700,7 +3783,7 @@ static const struct file_operations perf_fops = {
 
 void perf_event_wakeup(struct perf_event *event)
 {
-       wake_up_all(&event->waitq);
+       ring_buffer_wakeup(event);
 
        if (event->pending_kill) {
                kill_fasync(&event->fasync, SIGIO, event->pending_kill);
@@ -5822,6 +5905,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        INIT_LIST_HEAD(&event->group_entry);
        INIT_LIST_HEAD(&event->event_entry);
        INIT_LIST_HEAD(&event->sibling_list);
+       INIT_LIST_HEAD(&event->rb_entry);
+
        init_waitqueue_head(&event->waitq);
        init_irq_work(&event->pending, perf_pending_event);
 
@@ -6028,6 +6113,8 @@ set:
 
        old_rb = event->rb;
        rcu_assign_pointer(event->rb, rb);
+       if (old_rb)
+               ring_buffer_detach(event, old_rb);
        ret = 0;
 unlock:
        mutex_unlock(&event->mmap_mutex);
index 09097dd..64568a6 100644 (file)
@@ -22,6 +22,9 @@ struct ring_buffer {
        local_t                         lost;           /* nr records lost   */
 
        long                            watermark;      /* wakeup watermark  */
+       /* poll crap */
+       spinlock_t                      event_lock;
+       struct list_head                event_list;
 
        struct perf_event_mmap_page     *user_page;
        void                            *data_pages[0];
index a2a2920..7f3011c 100644 (file)
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
                rb->writable = 1;
 
        atomic_set(&rb->refcount, 1);
+
+       INIT_LIST_HEAD(&rb->event_list);
+       spin_lock_init(&rb->event_lock);
 }
 
 #ifndef CONFIG_PERF_USE_VMALLOC
index 0e2b179..1da999f 100644 (file)
@@ -623,8 +623,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
+       set_current_state(TASK_INTERRUPTIBLE);
+
        while (!kthread_should_stop()) {
-               set_current_state(TASK_INTERRUPTIBLE);
 
                if (test_and_clear_bit(IRQTF_RUNTHREAD,
                                       &action->thread_flags)) {
@@ -632,7 +633,9 @@ static int irq_wait_for_interrupt(struct irqaction *action)
                        return 0;
                }
                schedule();
+               set_current_state(TASK_INTERRUPTIBLE);
        }
+       __set_current_state(TASK_RUNNING);
        return -1;
 }
 
index bbdfe2a..66ff710 100644 (file)
@@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key)
                return;
 
        jump_label_lock();
-       if (atomic_add_return(1, &key->enabled) == 1)
+       if (atomic_read(&key->enabled) == 0)
                jump_label_update(key, JUMP_LABEL_ENABLE);
+       atomic_inc(&key->enabled);
        jump_label_unlock();
 }
 
index e69434b..b2e08c9 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/stringify.h>
 #include <linux/bitops.h>
 #include <linux/gfp.h>
+#include <linux/kmemcheck.h>
 
 #include <asm/sections.h>
 
@@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
                      struct lock_class_key *key, int subclass)
 {
-       memset(lock, 0, sizeof(*lock));
+       int i;
+
+       kmemcheck_mark_initialized(lock, sizeof(*lock));
+
+       for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+               lock->class_cache[i] = NULL;
 
 #ifdef CONFIG_LOCK_STAT
        lock->cpu = raw_smp_processor_id();
index 1455a0d..7982a0a 100644 (file)
@@ -1293,10 +1293,11 @@ again:
        raw_spin_lock(&logbuf_lock);
        if (con_start != log_end)
                retry = 1;
+       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
        if (retry && console_trylock())
                goto again;
 
-       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
        if (wake_klogd)
                wake_up_klogd();
 }
index 0e9344a..d6b149c 100644 (file)
@@ -71,6 +71,7 @@
 #include <linux/ctype.h>
 #include <linux/ftrace.h>
 #include <linux/slab.h>
+#include <linux/init_task.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
@@ -4810,6 +4811,9 @@ EXPORT_SYMBOL(wait_for_completion);
  * This waits for either a completion of a specific task to be signaled or for a
  * specified timeout to expire. The timeout is in jiffies. It is not
  * interruptible.
+ *
+ * The return value is 0 if timed out, and positive (at least 1, or number of
+ * jiffies left till timeout) if completed.
  */
 unsigned long __sched
 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -4824,6 +4828,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
  *
  * This waits for completion of a specific task to be signaled. It is
  * interruptible.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_interruptible(struct completion *x)
 {
@@ -4841,6 +4847,9 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
  *
  * This waits for either a completion of a specific task to be signaled or for a
  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_interruptible_timeout(struct completion *x,
@@ -4856,6 +4865,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  *
  * This waits to be signaled for completion of a specific task. It can be
  * interrupted by a kill signal.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_killable(struct completion *x)
 {
@@ -4874,6 +4885,9 @@ EXPORT_SYMBOL(wait_for_completion_killable);
  * This waits for either a completion of a specific task to be
  * signaled or for a specified timeout to expire. It can be
  * interrupted by a kill signal. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_killable_timeout(struct completion *x,
@@ -6099,6 +6113,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         */
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
+#if defined(CONFIG_SMP)
+       sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
 }
 
 /*
index 5c9e679..8a39fa3 100644 (file)
@@ -772,19 +772,32 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
                list_del_leaf_cfs_rq(cfs_rq);
 }
 
+static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+{
+       long tg_weight;
+
+       /*
+        * Use this CPU's actual weight instead of the last load_contribution
+        * to gain a more accurate current total weight. See
+        * update_cfs_rq_load_contribution().
+        */
+       tg_weight = atomic_read(&tg->load_weight);
+       tg_weight -= cfs_rq->load_contribution;
+       tg_weight += cfs_rq->load.weight;
+
+       return tg_weight;
+}
+
 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
-       long load_weight, load, shares;
+       long tg_weight, load, shares;
 
+       tg_weight = calc_tg_weight(tg, cfs_rq);
        load = cfs_rq->load.weight;
 
-       load_weight = atomic_read(&tg->load_weight);
-       load_weight += load;
-       load_weight -= cfs_rq->load_contribution;
-
        shares = (tg->shares * load);
-       if (load_weight)
-               shares /= load_weight;
+       if (tg_weight)
+               shares /= tg_weight;
 
        if (shares < MIN_SHARES)
                shares = MIN_SHARES;
@@ -1743,7 +1756,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
-       if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running)
+       if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
                return;
 
        __return_cfs_rq_runtime(cfs_rq);
@@ -2036,36 +2049,100 @@ static void task_waking_fair(struct task_struct *p)
  * Adding load to a group doesn't make a group heavier, but can cause movement
  * of group shares between cpus. Assuming the shares were perfectly aligned one
  * can calculate the shift in shares.
+ *
+ * Calculate the effective load difference if @wl is added (subtracted) to @tg
+ * on this @cpu and results in a total addition (subtraction) of @wg to the
+ * total group weight.
+ *
+ * Given a runqueue weight distribution (rw_i) we can compute a shares
+ * distribution (s_i) using:
+ *
+ *   s_i = rw_i / \Sum rw_j                                            (1)
+ *
+ * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
+ * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
+ * shares distribution (s_i):
+ *
+ *   rw_i = {   2,   4,   1,   0 }
+ *   s_i  = { 2/7, 4/7, 1/7,   0 }
+ *
+ * As per wake_affine() we're interested in the load of two CPUs (the CPU the
+ * task used to run on and the CPU the waker is running on), we need to
+ * compute the effect of waking a task on either CPU and, in case of a sync
+ * wakeup, compute the effect of the current task going to sleep.
+ *
+ * So for a change of @wl to the local @cpu with an overall group weight change
+ * of @wl we can compute the new shares distribution (s'_i) using:
+ *
+ *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)                           (2)
+ *
+ * Suppose we're interested in CPUs 0 and 1, and want to compute the load
+ * differences in waking a task to CPU 0. The additional task changes the
+ * weight and shares distributions like:
+ *
+ *   rw'_i = {   3,   4,   1,   0 }
+ *   s'_i  = { 3/8, 4/8, 1/8,   0 }
+ *
+ * We can then compute the difference in effective weight by using:
+ *
+ *   dw_i = S * (s'_i - s_i)                                           (3)
+ *
+ * Where 'S' is the group weight as seen by its parent.
+ *
+ * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
+ * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
+ * 4/7) times the weight of the group.
  */
 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
 {
        struct sched_entity *se = tg->se[cpu];
 
-       if (!tg->parent)
+       if (!tg->parent)        /* the trivial, non-cgroup case */
                return wl;
 
        for_each_sched_entity(se) {
-               long lw, w;
+               long w, W;
 
                tg = se->my_q->tg;
-               w = se->my_q->load.weight;
 
-               /* use this cpu's instantaneous contribution */
-               lw = atomic_read(&tg->load_weight);
-               lw -= se->my_q->load_contribution;
-               lw += w + wg;
+               /*
+                * W = @wg + \Sum rw_j
+                */
+               W = wg + calc_tg_weight(tg, se->my_q);
 
-               wl += w;
+               /*
+                * w = rw_i + @wl
+                */
+               w = se->my_q->load.weight + wl;
 
-               if (lw > 0 && wl < lw)
-                       wl = (wl * tg->shares) / lw;
+               /*
+                * wl = S * s'_i; see (2)
+                */
+               if (W > 0 && w < W)
+                       wl = (w * tg->shares) / W;
                else
                        wl = tg->shares;
 
-               /* zero point is MIN_SHARES */
+               /*
+                * Per the above, wl is the new se->load.weight value; since
+                * those are clipped to [MIN_SHARES, ...) do so now. See
+                * calc_cfs_shares().
+                */
                if (wl < MIN_SHARES)
                        wl = MIN_SHARES;
+
+               /*
+                * wl = dw_i = S * (s'_i - s_i); see (3)
+                */
                wl -= se->load.weight;
+
+               /*
+                * Recursively apply this logic to all parent groups to compute
+                * the final effective load change on the root group. Since
+                * only the @tg group gets extra weight, all parent groups can
+                * only redistribute existing shares. @wl is the shift in shares
+                * resulting from this level per the above.
+                */
                wg = 0;
        }
 
@@ -2249,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
        struct sched_domain *sd;
-       int i;
+       struct sched_group *sg;
+       int i, smt = 0;
 
        /*
         * If the task is going to be woken-up on this cpu and if it is
@@ -2269,25 +2347,40 @@ static int select_idle_sibling(struct task_struct *p, int target)
         * Otherwise, iterate the domains and find an elegible idle cpu.
         */
        rcu_read_lock();
+again:
        for_each_domain(target, sd) {
+               if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
+                       continue;
+
+               if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
+                       break;
+
                if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
                        break;
 
-               for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
-                       if (idle_cpu(i)) {
-                               target = i;
-                               break;
+               sg = sd->groups;
+               do {
+                       if (!cpumask_intersects(sched_group_cpus(sg),
+                                               tsk_cpus_allowed(p)))
+                               goto next;
+
+                       for_each_cpu(i, sched_group_cpus(sg)) {
+                               if (!idle_cpu(i))
+                                       goto next;
                        }
-               }
 
-               /*
-                * Lets stop looking for an idle sibling when we reached
-                * the domain that spans the current cpu and prev_cpu.
-                */
-               if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
-                   cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
-                       break;
+                       target = cpumask_first_and(sched_group_cpus(sg),
+                                       tsk_cpus_allowed(p));
+                       goto done;
+next:
+                       sg = sg->next;
+               } while (sg != sd->groups);
+       }
+       if (!smt) {
+               smt = 1;
+               goto again;
        }
+done:
        rcu_read_unlock();
 
        return target;
@@ -3511,7 +3604,7 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
 }
 
 /**
- * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+ * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
  * @sd: sched_domain whose statistics are to be updated.
  * @this_cpu: Cpu for which load balance is currently performed.
  * @idle: Idle status of this_cpu
index efa0a7b..8480224 100644 (file)
@@ -67,3 +67,4 @@ SCHED_FEAT(NONTASK_POWER, 1)
 SCHED_FEAT(TTWU_QUEUE, 1)
 
 SCHED_FEAT(FORCE_SD_OVERLAP, 0)
+SCHED_FEAT(RT_RUNTIME_SHARE, 1)
index 056cbd2..583a136 100644 (file)
@@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
 {
        int more = 0;
 
+       if (!sched_feat(RT_RUNTIME_SHARE))
+               return more;
+
        if (rt_rq->rt_time > rt_rq->rt_runtime) {
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
                more = do_balance_runtime(rt_rq);
index 6318b51..a650694 100644 (file)
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
 
        fput(file);
 out_putname:
-       putname(pathname);
+       __putname(pathname);
 out:
        return result;
 }
index c436e79..8a46f5d 100644 (file)
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
                struct alarm *alarm;
                ktime_t expired = next->expires;
 
-               if (expired.tv64 >= now.tv64)
+               if (expired.tv64 > now.tv64)
                        break;
 
                alarm = container_of(next, struct alarm, node);
index 1ecd6ba..c4eb71c 100644 (file)
@@ -387,6 +387,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
         * released list and do a notify add later.
         */
        if (old) {
+               old->event_handler = clockevents_handle_noop;
                clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
                list_del(&old->list);
                list_add(&old->list, &clockevents_released);
index cfc65e1..d3ad022 100644 (file)
@@ -548,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
         * note a margin of 12.5% is used because this can be computed with
         * a shift, versus say 10% which would require division.
         */
-       return max_nsecs - (max_nsecs >> 5);
+       return max_nsecs - (max_nsecs >> 3);
 }
 
 #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -647,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs)
 
 /**
  * __clocksource_updatefreq_scale - Used update clocksource with new freq
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  * @scale:     Scale factor multiplied against freq to get clocksource hz
  * @freq:      clocksource frequency (cycles per second) divided by scale
  *
@@ -669,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
         * ~ 0.06ppm granularity for NTP. We apply the same 12.5%
         * margin as we do in clocksource_max_deferment()
         */
-       sec = (cs->mask - (cs->mask >> 5));
+       sec = (cs->mask - (cs->mask >> 3));
        do_div(sec, freq);
        do_div(sec, scale);
        if (!sec)
@@ -699,7 +699,7 @@ EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
 
 /**
  * __clocksource_register_scale - Used to install new clocksources
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  * @scale:     Scale factor multiplied against freq to get clocksource hz
  * @freq:      clocksource frequency (cycles per second) divided by scale
  *
@@ -727,7 +727,7 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
 
 /**
  * clocksource_register - Used to install new clocksources
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  *
  * Returns -EBUSY if registration fails, zero otherwise.
  */
@@ -761,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
 
 /**
  * clocksource_change_rating - Change the rating of a registered clocksource
+ * @cs:                clocksource to be changed
+ * @rating:    new rating
  */
 void clocksource_change_rating(struct clocksource *cs, int rating)
 {
@@ -772,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating);
 
 /**
  * clocksource_unregister - remove a registered clocksource
+ * @cs:        clocksource to be unregistered
  */
 void clocksource_unregister(struct clocksource *cs)
 {
@@ -787,6 +790,7 @@ EXPORT_SYMBOL(clocksource_unregister);
 /**
  * sysfs_show_current_clocksources - sysfs interface for current clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       char buffer to be filled with clocksource list
  *
  * Provides sysfs interface for listing current clocksource.
@@ -807,6 +811,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
 /**
  * sysfs_override_clocksource - interface for manually overriding clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       name of override clocksource
  * @count:     length of buffer
  *
@@ -842,6 +847,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
 /**
  * sysfs_show_available_clocksources - sysfs interface for listing clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       char buffer to be filled with clocksource list
  *
  * Provides sysfs interface for listing registered clocksources
index f954282..fd4a7b1 100644 (file)
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
             (dev->features & CLOCK_EVT_FEAT_C3STOP))
                return 0;
 
-       clockevents_exchange_device(NULL, dev);
+       clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
        tick_broadcast_device.evtdev = dev;
        if (!cpumask_empty(tick_get_broadcast_mask()))
                tick_broadcast_start_periodic(dev);
index dbaa624..9c3c62b 100644 (file)
@@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid)
        int pid;
 
        rcu_read_lock();
-       pid = task_tgid_vnr(current->real_parent);
+       pid = task_tgid_vnr(rcu_dereference(current->real_parent));
        rcu_read_unlock();
 
        return pid;
index 900b409..b1e8943 100644 (file)
@@ -152,7 +152,6 @@ void clear_ftrace_function(void)
        ftrace_pid_function = ftrace_stub;
 }
 
-#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 /*
  * For those archs that do not test ftrace_trace_stop in their
@@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        if (!src->count) {
                free_ftrace_hash_rcu(*dst);
                rcu_assign_pointer(*dst, EMPTY_HASH);
-               return 0;
+               /* still need to update the function records */
+               ret = 0;
+               goto out;
        }
 
        /*
index 581876f..c212a7f 100644 (file)
@@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
        /* First see if we did not already create this dir */
        list_for_each_entry(system, &event_subsystems, list) {
                if (strcmp(system->name, name) == 0) {
-                       __get_system(system);
                        system->nr_events++;
                        return system->entry;
                }
index 816d3d0..95dc31e 100644 (file)
@@ -1649,7 +1649,9 @@ static int replace_system_preds(struct event_subsystem *system,
                 */
                err = replace_preds(call, NULL, ps, filter_string, true);
                if (err)
-                       goto fail;
+                       call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+               else
+                       call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
        }
 
        list_for_each_entry(call, &ftrace_events, list) {
@@ -1658,6 +1660,9 @@ static int replace_system_preds(struct event_subsystem *system,
                if (strcmp(call->class->system, system->name) != 0)
                        continue;
 
+               if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
+                       continue;
+
                filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
                if (!filter_item)
                        goto fail_mem;
@@ -1686,7 +1691,7 @@ static int replace_system_preds(struct event_subsystem *system,
                 * replace the filter for the call.
                 */
                filter = call->filter;
-               call->filter = filter_item->filter;
+               rcu_assign_pointer(call->filter, filter_item->filter);
                filter_item->filter = filter;
 
                fail = false;
@@ -1741,7 +1746,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
                filter = call->filter;
                if (!filter)
                        goto out_unlock;
-               call->filter = NULL;
+               RCU_INIT_POINTER(call->filter, NULL);
                /* Make sure the filter is not being used */
                synchronize_sched();
                __free_filter(filter);
@@ -1782,7 +1787,7 @@ out:
         * string
         */
        tmp = call->filter;
-       call->filter = filter;
+       rcu_assign_pointer(call->filter, filter);
        if (tmp) {
                /* Make sure the call is done with the filter */
                synchronize_sched();
index 74c6c7f..fea790a 100644 (file)
@@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket,
 
 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
 {
-       return ((a->dev_addr == a->dev_addr) &&
+       return ((a->dev_addr == b->dev_addr) &&
                (a->dev == b->dev)) ? true : false;
 }
 
index 6a3bd48..75510e9 100644 (file)
@@ -1,5 +1,6 @@
 #include <asm/div64.h>
 #include <linux/reciprocal_div.h>
+#include <linux/export.h>
 
 u32 reciprocal_value(u32 k)
 {
@@ -7,3 +8,4 @@ u32 reciprocal_value(u32 k)
        do_div(val, k);
        return (u32)val;
 }
+EXPORT_SYMBOL(reciprocal_value);
index c0018f2..5f0a3c9 100644 (file)
@@ -1828,7 +1828,7 @@ repeat:
                page = __page_cache_alloc(gfp | __GFP_COLD);
                if (!page)
                        return ERR_PTR(-ENOMEM);
-               err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+               err = add_to_page_cache_lru(page, mapping, index, gfp);
                if (unlikely(err)) {
                        page_cache_release(page);
                        if (err == -EEXIST)
@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
  * @gfp:       the page allocator flags to use if allocating
  *
  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
  *
  * If the page does not get brought uptodate, return -EIO.
  */
@@ -2407,7 +2404,6 @@ static ssize_t generic_perform_write(struct file *file,
                                                iov_iter_count(i));
 
 again:
-
                /*
                 * Bring in the user page that we will copy from _first_.
                 * Otherwise there's a nasty deadlock on copying from the
@@ -2463,7 +2459,10 @@ again:
                written += copied;
 
                balance_dirty_pages_ratelimited(mapping);
-
+               if (fatal_signal_pending(current)) {
+                       status = -EINTR;
+                       break;
+               }
        } while (iov_iter_count(i));
 
        return written ? written : status;
index 4298aba..36b3d98 100644 (file)
@@ -2259,12 +2259,8 @@ static void khugepaged_do_scan(struct page **hpage)
 
 static void khugepaged_alloc_sleep(void)
 {
-       DEFINE_WAIT(wait);
-       add_wait_queue(&khugepaged_wait, &wait);
-       schedule_timeout_interruptible(
-               msecs_to_jiffies(
-                       khugepaged_alloc_sleep_millisecs));
-       remove_wait_queue(&khugepaged_wait, &wait);
+       wait_event_freezable_timeout(khugepaged_wait, false,
+                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 }
 
 #ifndef CONFIG_NUMA
@@ -2313,14 +2309,10 @@ static void khugepaged_loop(void)
                if (unlikely(kthread_should_stop()))
                        break;
                if (khugepaged_has_work()) {
-                       DEFINE_WAIT(wait);
                        if (!khugepaged_scan_sleep_millisecs)
                                continue;
-                       add_wait_queue(&khugepaged_wait, &wait);
-                       schedule_timeout_interruptible(
-                               msecs_to_jiffies(
-                                       khugepaged_scan_sleep_millisecs));
-                       remove_wait_queue(&khugepaged_wait, &wait);
+                       wait_event_freezable_timeout(khugepaged_wait, false,
+                           msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
                } else if (khugepaged_enabled())
                        wait_event_freezable(khugepaged_wait,
                                             khugepaged_wait_event());
index bb28a5f..73f17c0 100644 (file)
@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
index 6aff93c..94da8ee 100644 (file)
@@ -50,6 +50,8 @@
 #include <linux/cpu.h>
 #include <linux/oom.h>
 #include "internal.h"
+#include <net/sock.h>
+#include <net/tcp_memcontrol.h>
 
 #include <asm/uaccess.h>
 
@@ -286,6 +288,10 @@ struct mem_cgroup {
         */
        struct mem_cgroup_stat_cpu nocpu_base;
        spinlock_t pcp_counter_lock;
+
+#ifdef CONFIG_INET
+       struct tcp_memcontrol tcp_mem;
+#endif
 };
 
 /* Stuffs for move charges at task migration. */
@@ -365,7 +371,58 @@ enum charge_type {
 
 static void mem_cgroup_get(struct mem_cgroup *memcg);
 static void mem_cgroup_put(struct mem_cgroup *memcg);
-static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+
+/* Writing them here to avoid exposing memcg's inner layout */
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_INET
+#include <net/sock.h>
+#include <net/ip.h>
+
+static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
+void sock_update_memcg(struct sock *sk)
+{
+       /* A socket spends its whole life in the same cgroup */
+       if (sk->sk_cgrp) {
+               WARN_ON(1);
+               return;
+       }
+       if (static_branch(&memcg_socket_limit_enabled)) {
+               struct mem_cgroup *memcg;
+
+               BUG_ON(!sk->sk_prot->proto_cgroup);
+
+               rcu_read_lock();
+               memcg = mem_cgroup_from_task(current);
+               if (!mem_cgroup_is_root(memcg)) {
+                       mem_cgroup_get(memcg);
+                       sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
+               }
+               rcu_read_unlock();
+       }
+}
+EXPORT_SYMBOL(sock_update_memcg);
+
+void sock_release_memcg(struct sock *sk)
+{
+       if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) {
+               struct mem_cgroup *memcg;
+               WARN_ON(!sk->sk_cgrp->memcg);
+               memcg = sk->sk_cgrp->memcg;
+               mem_cgroup_put(memcg);
+       }
+}
+
+struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
+{
+       if (!memcg || mem_cgroup_is_root(memcg))
+               return NULL;
+
+       return &memcg->tcp_mem.cg_proto;
+}
+EXPORT_SYMBOL(tcp_proto_cgroup);
+#endif /* CONFIG_INET */
+#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+
 static void drain_all_stock_async(struct mem_cgroup *memcg);
 
 static struct mem_cgroup_per_zone *
@@ -745,7 +802,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
        preempt_enable();
 }
 
-static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
+struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
 {
        return container_of(cgroup_subsys_state(cont,
                                mem_cgroup_subsys_id), struct mem_cgroup,
@@ -4612,6 +4669,36 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
 }
 #endif /* CONFIG_NUMA */
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+       /*
+        * Part of this would be better living in a separate allocation
+        * function, leaving us with just the cgroup tree population work.
+        * We, however, depend on state such as network's proto_list that
+        * is only initialized after cgroup creation. I found the less
+        * cumbersome way to deal with it to defer it all to populate time
+        */
+       return mem_cgroup_sockets_init(cont, ss);
+};
+
+static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
+                               struct cgroup *cont)
+{
+       mem_cgroup_sockets_destroy(cont, ss);
+}
+#else
+static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+       return 0;
+}
+
+static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
+                               struct cgroup *cont)
+{
+}
+#endif
+
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
@@ -4843,12 +4930,13 @@ static void mem_cgroup_put(struct mem_cgroup *memcg)
 /*
  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
  */
-static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
+struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 {
        if (!memcg->res.parent)
                return NULL;
        return mem_cgroup_from_res_counter(memcg->res.parent, res);
 }
+EXPORT_SYMBOL(parent_mem_cgroup);
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 static void __init enable_swap_cgroup(void)
@@ -4907,9 +4995,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                int cpu;
                enable_swap_cgroup();
                parent = NULL;
-               root_mem_cgroup = memcg;
                if (mem_cgroup_soft_limit_tree_init())
                        goto free_out;
+               root_mem_cgroup = memcg;
                for_each_possible_cpu(cpu) {
                        struct memcg_stock_pcp *stock =
                                                &per_cpu(memcg_stock, cpu);
@@ -4948,7 +5036,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        return &memcg->css;
 free_out:
        __mem_cgroup_free(memcg);
-       root_mem_cgroup = NULL;
        return ERR_PTR(error);
 }
 
@@ -4965,6 +5052,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 
+       kmem_cgroup_destroy(ss, cont);
+
        mem_cgroup_put(memcg);
 }
 
@@ -4978,6 +5067,10 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss,
 
        if (!ret)
                ret = register_memsw_files(cont, ss);
+
+       if (!ret)
+               ret = register_kmem_files(cont, ss);
+
        return ret;
 }
 
index 578e291..177aca4 100644 (file)
@@ -871,9 +871,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 
        if (anon_vma)
                put_anon_vma(anon_vma);
-out:
        unlock_page(hpage);
 
+out:
        if (rc != -EAGAIN) {
                list_del(&hpage->lru);
                put_page(hpage);
index 76f2c5a..069b64e 100644 (file)
@@ -176,7 +176,7 @@ static bool oom_unkillable_task(struct task_struct *p,
 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
                      const nodemask_t *nodemask, unsigned long totalpages)
 {
-       int points;
+       long points;
 
        if (oom_unkillable_task(p, mem, nodemask))
                return 0;
index 7125248..50f0824 100644 (file)
@@ -411,8 +411,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
  *
  * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
  * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
- * And the "limit" in the name is not seriously taken as hard limit in
- * balance_dirty_pages().
+ *
+ * Note that balance_dirty_pages() will only seriously take it as a hard limit
+ * when sleeping max_pause per page is not enough to keep the dirty pages under
+ * control. For example, when the device is completely stalled due to some error
+ * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
+ * In the other normal situations, it acts more gently by throttling the tasks
+ * more (rather than completely block them) when the bdi dirty pages go high.
  *
  * It allocates high/low dirty limits to fast/slow devices, in order to prevent
  * - starving fast devices
@@ -594,6 +599,13 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
         */
        if (unlikely(bdi_thresh > thresh))
                bdi_thresh = thresh;
+       /*
+        * It's very possible that bdi_thresh is close to 0 not because the
+        * device is slow, but that it has remained inactive for long time.
+        * Honour such devices a reasonable good (hopefully IO efficient)
+        * threshold, so that the occasional writes won't be blocked and active
+        * writes can rampup the threshold quickly.
+        */
        bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
        /*
         * scale global setpoint to bdi's:
@@ -977,8 +989,7 @@ static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
         *
         * 8 serves as the safety ratio.
         */
-       if (bdi_dirty)
-               t = min(t, bdi_dirty * HZ / (8 * bw + 1));
+       t = min(t, bdi_dirty * HZ / (8 * bw + 1));
 
        /*
         * The pause time will be settled within range (max_pause/4, max_pause).
@@ -1136,6 +1147,19 @@ pause:
                if (task_ratelimit)
                        break;
 
+               /*
+                * In the case of an unresponding NFS server and the NFS dirty
+                * pages exceeds dirty_thresh, give the other good bdi's a pipe
+                * to go through, so that tasks on them still remain responsive.
+                *
+                * In theory 1 page is enough to keep the comsumer-producer
+                * pipe going: the flusher cleans 1 page => the task dirties 1
+                * more page. However bdi_dirty has accounting errors.  So use
+                * the larger and more IO friendly bdi_stat_error.
+                */
+               if (bdi_dirty <= bdi_stat_error(bdi))
+                       break;
+
                if (fatal_signal_pending(current))
                        break;
        }
index 9dd443d..2b8ba3a 100644 (file)
@@ -356,8 +356,8 @@ void prep_compound_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
-
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -3377,9 +3377,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        unsigned long block_migratetype;
        int reserve;
 
-       /* Get the start pfn, end pfn and the number of blocks to reserve */
+       /*
+        * Get the start pfn, end pfn and the number of blocks to reserve
+        * We have to be careful to be aligned to pageblock_nr_pages to
+        * make sure that we always check pfn_valid for the first page in
+        * the block.
+        */
        start_pfn = zone->zone_start_pfn;
        end_pfn = start_pfn + zone->spanned_pages;
+       start_pfn = roundup(start_pfn, pageblock_nr_pages);
        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
index 3bb810a..716eb4a 100644 (file)
@@ -1023,9 +1023,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
                if (!is_vmalloc_addr(addr))
                        return __pa(addr);
                else
-                       return page_to_phys(vmalloc_to_page(addr));
+                       return page_to_phys(vmalloc_to_page(addr)) +
+                              offset_in_page(addr);
        } else
-               return page_to_phys(pcpu_addr_to_page(addr));
+               return page_to_phys(pcpu_addr_to_page(addr)) +
+                      offset_in_page(addr);
 }
 
 /**
index 708efe8..83311c9 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -595,6 +595,7 @@ static enum {
        PARTIAL_AC,
        PARTIAL_L3,
        EARLY,
+       LATE,
        FULL
 } g_cpucache_up;
 
@@ -671,7 +672,7 @@ static void init_node_lock_keys(int q)
 {
        struct cache_sizes *s = malloc_sizes;
 
-       if (g_cpucache_up != FULL)
+       if (g_cpucache_up < LATE)
                return;
 
        for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1666,6 +1667,8 @@ void __init kmem_cache_init_late(void)
 {
        struct kmem_cache *cachep;
 
+       g_cpucache_up = LATE;
+
        /* Annotate slab for lockdep -- annotate the malloc caches */
        init_lock_keys();
 
index 3231bf3..27be2f0 100644 (file)
@@ -1290,7 +1290,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
                unsigned long align, unsigned long flags, unsigned long start,
                unsigned long end, int node, gfp_t gfp_mask, void *caller)
 {
-       static struct vmap_area *va;
+       struct vmap_area *va;
        struct vm_struct *area;
 
        BUG_ON(in_interrupt());
@@ -1633,6 +1633,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                goto fail;
 
        addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+       if (!addr)
+               return NULL;
 
        /*
         * In this function, newly allocated vm_struct is not added
index a1893c0..f54a05b 100644 (file)
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
  */
 void register_shrinker(struct shrinker *shrinker)
 {
-       shrinker->nr = 0;
+       atomic_long_set(&shrinker->nr_in_batch, 0);
        down_write(&shrinker_rwsem);
        list_add_tail(&shrinker->list, &shrinker_list);
        up_write(&shrinker_rwsem);
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink,
 
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
-               unsigned long total_scan;
-               unsigned long max_pass;
+               long total_scan;
+               long max_pass;
                int shrink_ret = 0;
                long nr;
                long new_nr;
                long batch_size = shrinker->batch ? shrinker->batch
                                                  : SHRINK_BATCH;
 
+               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+               if (max_pass <= 0)
+                       continue;
+
                /*
                 * copy the current shrinker scan count into a local variable
                 * and zero it so that other concurrent shrinker invocations
                 * don't also do this scanning work.
                 */
-               do {
-                       nr = shrinker->nr;
-               } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+               nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
 
                total_scan = nr;
-               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
                delta = (4 * nr_pages_scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
                 * manner that handles concurrent updates. If we exhausted the
                 * scan, there is no need to do an update.
                 */
-               do {
-                       nr = shrinker->nr;
-                       new_nr = total_scan + nr;
-                       if (total_scan <= 0)
-                               break;
-               } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+               if (total_scan > 0)
+                       new_nr = atomic_long_add_return(total_scan,
+                                       &shrinker->nr_in_batch);
+               else
+                       new_nr = atomic_long_read(&shrinker->nr_in_batch);
 
                trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
        }
index 5471628..efea35b 100644 (file)
@@ -51,27 +51,6 @@ const char vlan_version[] = DRV_VERSION;
 
 /* End of global variables definitions. */
 
-static void vlan_group_free(struct vlan_group *grp)
-{
-       int i;
-
-       for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
-               kfree(grp->vlan_devices_arrays[i]);
-       kfree(grp);
-}
-
-static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
-{
-       struct vlan_group *grp;
-
-       grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
-       if (!grp)
-               return NULL;
-
-       grp->real_dev = real_dev;
-       return grp;
-}
-
 static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
 {
        struct net_device **array;
@@ -92,32 +71,29 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
        return 0;
 }
 
-static void vlan_rcu_free(struct rcu_head *rcu)
-{
-       vlan_group_free(container_of(rcu, struct vlan_group, rcu));
-}
-
 void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
-       const struct net_device_ops *ops = real_dev->netdev_ops;
+       struct vlan_info *vlan_info;
        struct vlan_group *grp;
        u16 vlan_id = vlan->vlan_id;
 
        ASSERT_RTNL();
 
-       grp = rtnl_dereference(real_dev->vlgrp);
-       BUG_ON(!grp);
+       vlan_info = rtnl_dereference(real_dev->vlan_info);
+       BUG_ON(!vlan_info);
+
+       grp = &vlan_info->grp;
 
        /* Take it out of our own structures, but be sure to interlock with
         * HW accelerating devices or SW vlan input packet processing if
         * VLAN is not 0 (leave it there for 802.1p).
         */
-       if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
-               ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
+       if (vlan_id)
+               vlan_vid_del(real_dev, vlan_id);
 
-       grp->nr_vlans--;
+       grp->nr_vlan_devs--;
 
        if (vlan->flags & VLAN_FLAG_GVRP)
                vlan_gvrp_request_leave(dev);
@@ -129,16 +105,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
         */
        unregister_netdevice_queue(dev, head);
 
-       /* If the group is now empty, kill off the group. */
-       if (grp->nr_vlans == 0) {
+       if (grp->nr_vlan_devs == 0)
                vlan_gvrp_uninit_applicant(real_dev);
 
-               RCU_INIT_POINTER(real_dev->vlgrp, NULL);
-
-               /* Free the group, after all cpu's are done. */
-               call_rcu(&grp->rcu, vlan_rcu_free);
-       }
-
        /* Get rid of the vlan's reference to real_dev */
        dev_put(real_dev);
 }
@@ -167,21 +136,26 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
 
 int register_vlan_dev(struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
-       const struct net_device_ops *ops = real_dev->netdev_ops;
        u16 vlan_id = vlan->vlan_id;
-       struct vlan_group *grp, *ngrp = NULL;
+       struct vlan_info *vlan_info;
+       struct vlan_group *grp;
        int err;
 
-       grp = rtnl_dereference(real_dev->vlgrp);
-       if (!grp) {
-               ngrp = grp = vlan_group_alloc(real_dev);
-               if (!grp)
-                       return -ENOBUFS;
+       err = vlan_vid_add(real_dev, vlan_id);
+       if (err)
+               return err;
+
+       vlan_info = rtnl_dereference(real_dev->vlan_info);
+       /* vlan_info should be there now. vlan_vid_add took care of it */
+       BUG_ON(!vlan_info);
+
+       grp = &vlan_info->grp;
+       if (grp->nr_vlan_devs == 0) {
                err = vlan_gvrp_init_applicant(real_dev);
                if (err < 0)
-                       goto out_free_group;
+                       goto out_vid_del;
        }
 
        err = vlan_group_prealloc_vid(grp, vlan_id);
@@ -192,7 +166,7 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_applicant;
 
-       /* Account for reference in struct vlan_dev_info */
+       /* Account for reference in struct vlan_dev_priv */
        dev_hold(real_dev);
 
        netif_stacked_transfer_operstate(real_dev, dev);
@@ -202,24 +176,15 @@ int register_vlan_dev(struct net_device *dev)
         * it into our local structure.
         */
        vlan_group_set_device(grp, vlan_id, dev);
-       grp->nr_vlans++;
-
-       if (ngrp) {
-               rcu_assign_pointer(real_dev->vlgrp, ngrp);
-       }
-       if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
-               ops->ndo_vlan_rx_add_vid(real_dev, vlan_id);
+       grp->nr_vlan_devs++;
 
        return 0;
 
 out_uninit_applicant:
-       if (ngrp)
+       if (grp->nr_vlan_devs == 0)
                vlan_gvrp_uninit_applicant(real_dev);
-out_free_group:
-       if (ngrp) {
-               /* Free the group, after all cpu's are done. */
-               call_rcu(&ngrp->rcu, vlan_rcu_free);
-       }
+out_vid_del:
+       vlan_vid_del(real_dev, vlan_id);
        return err;
 }
 
@@ -267,7 +232,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
                snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
        }
 
-       new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
+       new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, vlan_setup);
 
        if (new_dev == NULL)
                return -ENOBUFS;
@@ -278,10 +243,10 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
         */
        new_dev->mtu = real_dev->mtu;
 
-       vlan_dev_info(new_dev)->vlan_id = vlan_id;
-       vlan_dev_info(new_dev)->real_dev = real_dev;
-       vlan_dev_info(new_dev)->dent = NULL;
-       vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
+       vlan_dev_priv(new_dev)->vlan_id = vlan_id;
+       vlan_dev_priv(new_dev)->real_dev = real_dev;
+       vlan_dev_priv(new_dev)->dent = NULL;
+       vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
 
        new_dev->rtnl_link_ops = &vlan_link_ops;
        err = register_vlan_dev(new_dev);
@@ -298,7 +263,7 @@ out_free_newdev:
 static void vlan_sync_address(struct net_device *dev,
                              struct net_device *vlandev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(vlandev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 
        /* May be called without an actual change */
        if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
@@ -360,25 +325,26 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
 {
        struct net_device *dev = ptr;
        struct vlan_group *grp;
+       struct vlan_info *vlan_info;
        int i, flgs;
        struct net_device *vlandev;
-       struct vlan_dev_info *vlan;
+       struct vlan_dev_priv *vlan;
        LIST_HEAD(list);
 
        if (is_vlan_dev(dev))
                __vlan_device_event(dev, event);
 
        if ((event == NETDEV_UP) &&
-           (dev->features & NETIF_F_HW_VLAN_FILTER) &&
-           dev->netdev_ops->ndo_vlan_rx_add_vid) {
+           (dev->features & NETIF_F_HW_VLAN_FILTER)) {
                pr_info("adding VLAN 0 to HW filter on device %s\n",
                        dev->name);
-               dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
+               vlan_vid_add(dev, 0);
        }
 
-       grp = rtnl_dereference(dev->vlgrp);
-       if (!grp)
+       vlan_info = rtnl_dereference(dev->vlan_info);
+       if (!vlan_info)
                goto out;
+       grp = &vlan_info->grp;
 
        /* It is OK that we do not hold the group lock right now,
         * as we run under the RTNL lock.
@@ -447,7 +413,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                        if (!(flgs & IFF_UP))
                                continue;
 
-                       vlan = vlan_dev_info(vlandev);
+                       vlan = vlan_dev_priv(vlandev);
                        if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
                                dev_change_flags(vlandev, flgs & ~IFF_UP);
                        netif_stacked_transfer_operstate(dev, vlandev);
@@ -465,7 +431,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                        if (flgs & IFF_UP)
                                continue;
 
-                       vlan = vlan_dev_info(vlandev);
+                       vlan = vlan_dev_priv(vlandev);
                        if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
                                dev_change_flags(vlandev, flgs | IFF_UP);
                        netif_stacked_transfer_operstate(dev, vlandev);
@@ -482,9 +448,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                        if (!vlandev)
                                continue;
 
-                       /* unregistration of last vlan destroys group, abort
+                       /* removal of last vid destroys vlan_info, abort
                         * afterwards */
-                       if (grp->nr_vlans == 1)
+                       if (vlan_info->nr_vids == 1)
                                i = VLAN_N_VID;
 
                        unregister_vlan_dev(vlandev, &list);
index 9fd45f3..a4886d9 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/if_vlan.h>
 #include <linux/u64_stats_sync.h>
+#include <linux/list.h>
 
 
 /**
@@ -40,8 +41,10 @@ struct vlan_pcpu_stats {
        u32                     tx_dropped;
 };
 
+struct netpoll;
+
 /**
- *     struct vlan_dev_info - VLAN private device data
+ *     struct vlan_dev_priv - VLAN private device data
  *     @nr_ingress_mappings: number of ingress priority mappings
  *     @ingress_priority_map: ingress priority mappings
  *     @nr_egress_mappings: number of egress priority mappings
@@ -53,7 +56,7 @@ struct vlan_pcpu_stats {
  *     @dent: proc dir entry
  *     @vlan_pcpu_stats: ptr to percpu rx stats
  */
-struct vlan_dev_info {
+struct vlan_dev_priv {
        unsigned int                            nr_ingress_mappings;
        u32                                     ingress_priority_map[8];
        unsigned int                            nr_egress_mappings;
@@ -67,13 +70,39 @@ struct vlan_dev_info {
 
        struct proc_dir_entry                   *dent;
        struct vlan_pcpu_stats __percpu         *vlan_pcpu_stats;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       struct netpoll                          *netpoll;
+#endif
 };
 
-static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
+static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
 {
        return netdev_priv(dev);
 }
 
+/* if this changes, algorithm will have to be reworked because this
+ * depends on completely exhausting the VLAN identifier space.  Thus
+ * it gives constant time look-up, but in many cases it wastes memory.
+ */
+#define VLAN_GROUP_ARRAY_SPLIT_PARTS  8
+#define VLAN_GROUP_ARRAY_PART_LEN     (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
+
+struct vlan_group {
+       unsigned int            nr_vlan_devs;
+       struct hlist_node       hlist;  /* linked list */
+       struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
+};
+
+struct vlan_info {
+       struct net_device       *real_dev; /* The ethernet(like) device
+                                           * the vlan is attached to.
+                                           */
+       struct vlan_group       grp;
+       struct list_head        vid_list;
+       unsigned int            nr_vids;
+       struct rcu_head         rcu;
+};
+
 static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
                                                       u16 vlan_id)
 {
@@ -97,10 +126,10 @@ static inline void vlan_group_set_device(struct vlan_group *vg,
 static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
                                               u16 vlan_id)
 {
-       struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+       struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
 
-       if (grp)
-               return vlan_group_get_device(grp, vlan_id);
+       if (vlan_info)
+               return vlan_group_get_device(&vlan_info->grp, vlan_id);
 
        return NULL;
 }
@@ -121,7 +150,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
 static inline u32 vlan_get_ingress_priority(struct net_device *dev,
                                            u16 vlan_tci)
 {
-       struct vlan_dev_info *vip = vlan_dev_info(dev);
+       struct vlan_dev_priv *vip = vlan_dev_priv(dev);
 
        return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7];
 }
index 9c95e8e..4d39d80 100644 (file)
@@ -36,7 +36,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
                        skb->pkt_type = PACKET_HOST;
        }
 
-       if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+       if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
                unsigned int offset = skb->data - skb_mac_header(skb);
 
                /*
@@ -55,7 +55,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
        skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
        skb->vlan_tci = 0;
 
-       rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
+       rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
 
        u64_stats_update_begin(&rx_stats->syncp);
        rx_stats->rx_packets++;
@@ -71,10 +71,10 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
 struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
                                        u16 vlan_id)
 {
-       struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+       struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
 
-       if (grp) {
-               return vlan_group_get_device(grp, vlan_id);
+       if (vlan_info) {
+               return vlan_group_get_device(&vlan_info->grp, vlan_id);
        } else {
                /*
                 * Bonding slaves do not have grp assigned to themselves.
@@ -90,13 +90,13 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
 
 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
-       return vlan_dev_info(dev)->real_dev;
+       return vlan_dev_priv(dev)->real_dev;
 }
 EXPORT_SYMBOL(vlan_dev_real_dev);
 
 u16 vlan_dev_vlan_id(const struct net_device *dev)
 {
-       return vlan_dev_info(dev)->vlan_id;
+       return vlan_dev_priv(dev)->vlan_id;
 }
 EXPORT_SYMBOL(vlan_dev_vlan_id);
 
@@ -146,3 +146,226 @@ err_free:
        kfree_skb(skb);
        return NULL;
 }
+
+
+/*
+ * vlan info and vid list
+ */
+
+static void vlan_group_free(struct vlan_group *grp)
+{
+       int i;
+
+       for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
+               kfree(grp->vlan_devices_arrays[i]);
+}
+
+static void vlan_info_free(struct vlan_info *vlan_info)
+{
+       vlan_group_free(&vlan_info->grp);
+       kfree(vlan_info);
+}
+
+static void vlan_info_rcu_free(struct rcu_head *rcu)
+{
+       vlan_info_free(container_of(rcu, struct vlan_info, rcu));
+}
+
+static struct vlan_info *vlan_info_alloc(struct net_device *dev)
+{
+       struct vlan_info *vlan_info;
+
+       vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
+       if (!vlan_info)
+               return NULL;
+
+       vlan_info->real_dev = dev;
+       INIT_LIST_HEAD(&vlan_info->vid_list);
+       return vlan_info;
+}
+
+struct vlan_vid_info {
+       struct list_head list;
+       unsigned short vid;
+       int refcount;
+};
+
+static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
+                                              unsigned short vid)
+{
+       struct vlan_vid_info *vid_info;
+
+       list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+               if (vid_info->vid == vid)
+                       return vid_info;
+       }
+       return NULL;
+}
+
+static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid)
+{
+       struct vlan_vid_info *vid_info;
+
+       vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
+       if (!vid_info)
+               return NULL;
+       vid_info->vid = vid;
+
+       return vid_info;
+}
+
+static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
+                         struct vlan_vid_info **pvid_info)
+{
+       struct net_device *dev = vlan_info->real_dev;
+       const struct net_device_ops *ops = dev->netdev_ops;
+       struct vlan_vid_info *vid_info;
+       int err;
+
+       vid_info = vlan_vid_info_alloc(vid);
+       if (!vid_info)
+               return -ENOMEM;
+
+       if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+           ops->ndo_vlan_rx_add_vid) {
+               err =  ops->ndo_vlan_rx_add_vid(dev, vid);
+               if (err) {
+                       kfree(vid_info);
+                       return err;
+               }
+       }
+       list_add(&vid_info->list, &vlan_info->vid_list);
+       vlan_info->nr_vids++;
+       *pvid_info = vid_info;
+       return 0;
+}
+
+int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+       struct vlan_info *vlan_info;
+       struct vlan_vid_info *vid_info;
+       bool vlan_info_created = false;
+       int err;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(dev->vlan_info);
+       if (!vlan_info) {
+               vlan_info = vlan_info_alloc(dev);
+               if (!vlan_info)
+                       return -ENOMEM;
+               vlan_info_created = true;
+       }
+       vid_info = vlan_vid_info_get(vlan_info, vid);
+       if (!vid_info) {
+               err = __vlan_vid_add(vlan_info, vid, &vid_info);
+               if (err)
+                       goto out_free_vlan_info;
+       }
+       vid_info->refcount++;
+
+       if (vlan_info_created)
+               rcu_assign_pointer(dev->vlan_info, vlan_info);
+
+       return 0;
+
+out_free_vlan_info:
+       if (vlan_info_created)
+               kfree(vlan_info);
+       return err;
+}
+EXPORT_SYMBOL(vlan_vid_add);
+
+static void __vlan_vid_del(struct vlan_info *vlan_info,
+                          struct vlan_vid_info *vid_info)
+{
+       struct net_device *dev = vlan_info->real_dev;
+       const struct net_device_ops *ops = dev->netdev_ops;
+       unsigned short vid = vid_info->vid;
+       int err;
+
+       if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+            ops->ndo_vlan_rx_kill_vid) {
+               err = ops->ndo_vlan_rx_kill_vid(dev, vid);
+               if (err) {
+                       pr_warn("failed to kill vid %d for device %s\n",
+                               vid, dev->name);
+               }
+       }
+       list_del(&vid_info->list);
+       kfree(vid_info);
+       vlan_info->nr_vids--;
+}
+
+void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+       struct vlan_info *vlan_info;
+       struct vlan_vid_info *vid_info;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(dev->vlan_info);
+       if (!vlan_info)
+               return;
+
+       vid_info = vlan_vid_info_get(vlan_info, vid);
+       if (!vid_info)
+               return;
+       vid_info->refcount--;
+       if (vid_info->refcount == 0) {
+               __vlan_vid_del(vlan_info, vid_info);
+               if (vlan_info->nr_vids == 0) {
+                       RCU_INIT_POINTER(dev->vlan_info, NULL);
+                       call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
+               }
+       }
+}
+EXPORT_SYMBOL(vlan_vid_del);
+
+int vlan_vids_add_by_dev(struct net_device *dev,
+                        const struct net_device *by_dev)
+{
+       struct vlan_vid_info *vid_info;
+       struct vlan_info *vlan_info;
+       int err;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(by_dev->vlan_info);
+       if (!vlan_info)
+               return 0;
+
+       list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+               err = vlan_vid_add(dev, vid_info->vid);
+               if (err)
+                       goto unwind;
+       }
+       return 0;
+
+unwind:
+       list_for_each_entry_continue_reverse(vid_info,
+                                            &vlan_info->vid_list,
+                                            list) {
+               vlan_vid_del(dev, vid_info->vid);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL(vlan_vids_add_by_dev);
+
+void vlan_vids_del_by_dev(struct net_device *dev,
+                         const struct net_device *by_dev)
+{
+       struct vlan_vid_info *vid_info;
+       struct vlan_info *vlan_info;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(by_dev->vlan_info);
+       if (!vlan_info)
+               return;
+
+       list_for_each_entry(vid_info, &vlan_info->vid_list, list)
+               vlan_vid_del(dev, vid_info->vid);
+}
+EXPORT_SYMBOL(vlan_vids_del_by_dev);
index 2b5fcde..9988d4a 100644 (file)
@@ -33,6 +33,7 @@
 #include "vlan.h"
 #include "vlanproc.h"
 #include <linux/if_vlan.h>
+#include <linux/netpoll.h>
 
 /*
  *     Rebuild the Ethernet MAC header. This is called after an ARP
@@ -72,7 +73,7 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
 {
        struct vlan_priority_tci_mapping *mp;
 
-       mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)];
+       mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
        while (mp) {
                if (mp->priority == skb->priority) {
                        return mp->vlan_qos; /* This should already be shifted
@@ -103,10 +104,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
        u16 vlan_tci = 0;
        int rc;
 
-       if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+       if (!(vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
                vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
 
-               vlan_tci = vlan_dev_info(dev)->vlan_id;
+               vlan_tci = vlan_dev_priv(dev)->vlan_id;
                vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
                vhdr->h_vlan_TCI = htons(vlan_tci);
 
@@ -129,7 +130,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                saddr = dev->dev_addr;
 
        /* Now make the underlying real hard header */
-       dev = vlan_dev_info(dev)->real_dev;
+       dev = vlan_dev_priv(dev)->real_dev;
        rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
        if (rc > 0)
                rc += vhdrlen;
@@ -149,27 +150,29 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
         * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
         */
        if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
-           vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
+           vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) {
                u16 vlan_tci;
-               vlan_tci = vlan_dev_info(dev)->vlan_id;
+               vlan_tci = vlan_dev_priv(dev)->vlan_id;
                vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
                skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
        }
 
-       skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
+       skb_set_dev(skb, vlan_dev_priv(dev)->real_dev);
        len = skb->len;
+       if (netpoll_tx_running(dev))
+               return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
        ret = dev_queue_xmit(skb);
 
        if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
                struct vlan_pcpu_stats *stats;
 
-               stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
+               stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats);
                u64_stats_update_begin(&stats->syncp);
                stats->tx_packets++;
                stats->tx_bytes += len;
                u64_stats_update_end(&stats->syncp);
        } else {
-               this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
+               this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped);
        }
 
        return ret;
@@ -180,7 +183,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
        /* TODO: gotta make sure the underlying layer can handle it,
         * maybe an IFF_VLAN_CAPABLE flag for devices?
         */
-       if (vlan_dev_info(dev)->real_dev->mtu < new_mtu)
+       if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
                return -ERANGE;
 
        dev->mtu = new_mtu;
@@ -191,7 +194,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
 void vlan_dev_set_ingress_priority(const struct net_device *dev,
                                   u32 skb_prio, u16 vlan_prio)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
        if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
                vlan->nr_ingress_mappings--;
@@ -204,7 +207,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev,
 int vlan_dev_set_egress_priority(const struct net_device *dev,
                                 u32 skb_prio, u16 vlan_prio)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct vlan_priority_tci_mapping *mp = NULL;
        struct vlan_priority_tci_mapping *np;
        u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
@@ -241,7 +244,7 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
 /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
 int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        u32 old_flags = vlan->flags;
 
        if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
@@ -261,12 +264,12 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
 
 void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
 {
-       strncpy(result, vlan_dev_info(dev)->real_dev->name, 23);
+       strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
 }
 
 static int vlan_dev_open(struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
        int err;
 
@@ -313,7 +316,7 @@ out:
 
 static int vlan_dev_stop(struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
 
        dev_mc_unsync(real_dev, dev);
@@ -332,7 +335,7 @@ static int vlan_dev_stop(struct net_device *dev)
 
 static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        struct sockaddr *addr = p;
        int err;
 
@@ -358,7 +361,7 @@ out:
 
 static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        struct ifreq ifrr;
        int err = -EOPNOTSUPP;
@@ -383,7 +386,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
 static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int err = 0;
 
@@ -397,7 +400,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
 static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
                                   struct scatterlist *sgl, unsigned int sgc)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = 0;
 
@@ -409,7 +412,7 @@ static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
 
 static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int len = 0;
 
@@ -421,7 +424,7 @@ static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
 
 static int vlan_dev_fcoe_enable(struct net_device *dev)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = -EINVAL;
 
@@ -432,7 +435,7 @@ static int vlan_dev_fcoe_enable(struct net_device *dev)
 
 static int vlan_dev_fcoe_disable(struct net_device *dev)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = -EINVAL;
 
@@ -443,7 +446,7 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)
 
 static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = -EINVAL;
 
@@ -455,7 +458,7 @@ static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
                                    struct scatterlist *sgl, unsigned int sgc)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = 0;
 
@@ -468,7 +471,7 @@ static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
 
 static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 
        if (dev->flags & IFF_UP) {
                if (change & IFF_ALLMULTI)
@@ -480,8 +483,8 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
 
 static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
 {
-       dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
-       dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
+       dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
 /*
@@ -519,7 +522,7 @@ static const struct net_device_ops vlan_netdev_ops;
 
 static int vlan_dev_init(struct net_device *dev)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        int subclass = 0;
 
        netif_carrier_off(dev);
@@ -568,8 +571,8 @@ static int vlan_dev_init(struct net_device *dev)
 
        vlan_dev_set_lockdep_class(dev, subclass);
 
-       vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
-       if (!vlan_dev_info(dev)->vlan_pcpu_stats)
+       vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+       if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
                return -ENOMEM;
 
        return 0;
@@ -578,7 +581,7 @@ static int vlan_dev_init(struct net_device *dev)
 static void vlan_dev_uninit(struct net_device *dev)
 {
        struct vlan_priority_tci_mapping *pm;
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        int i;
 
        free_percpu(vlan->vlan_pcpu_stats);
@@ -594,7 +597,7 @@ static void vlan_dev_uninit(struct net_device *dev)
 static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
        netdev_features_t features)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        u32 old_features = features;
 
        features &= real_dev->vlan_features;
@@ -610,7 +613,7 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
 static int vlan_ethtool_get_settings(struct net_device *dev,
                                     struct ethtool_cmd *cmd)
 {
-       const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
        return __ethtool_get_settings(vlan->real_dev, cmd);
 }
@@ -626,7 +629,7 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
 static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
 
-       if (vlan_dev_info(dev)->vlan_pcpu_stats) {
+       if (vlan_dev_priv(dev)->vlan_pcpu_stats) {
                struct vlan_pcpu_stats *p;
                u32 rx_errors = 0, tx_dropped = 0;
                int i;
@@ -635,7 +638,7 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
                        u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
                        unsigned int start;
 
-                       p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
+                       p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
                        do {
                                start = u64_stats_fetch_begin_bh(&p->syncp);
                                rxpackets       = p->rx_packets;
@@ -660,6 +663,57 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
        return stats;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vlan_dev_poll_controller(struct net_device *dev)
+{
+       return;
+}
+
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+{
+       struct vlan_dev_priv *info = vlan_dev_priv(dev);
+       struct net_device *real_dev = info->real_dev;
+       struct netpoll *netpoll;
+       int err = 0;
+
+       netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+       err = -ENOMEM;
+       if (!netpoll)
+               goto out;
+
+       netpoll->dev = real_dev;
+       strlcpy(netpoll->dev_name, real_dev->name, IFNAMSIZ);
+
+       err = __netpoll_setup(netpoll);
+       if (err) {
+               kfree(netpoll);
+               goto out;
+       }
+
+       info->netpoll = netpoll;
+
+out:
+       return err;
+}
+
+static void vlan_dev_netpoll_cleanup(struct net_device *dev)
+{
+       struct vlan_dev_priv *info = vlan_dev_priv(dev);
+       struct netpoll *netpoll = info->netpoll;
+
+       if (!netpoll)
+               return;
+
+       info->netpoll = NULL;
+
+        /* Wait for transmitting packets to finish before freeing. */
+        synchronize_rcu_bh();
+
+        __netpoll_cleanup(netpoll);
+        kfree(netpoll);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
 static const struct ethtool_ops vlan_ethtool_ops = {
        .get_settings           = vlan_ethtool_get_settings,
        .get_drvinfo            = vlan_ethtool_get_drvinfo,
@@ -687,6 +741,11 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
        .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
        .ndo_fcoe_ddp_target    = vlan_dev_fcoe_ddp_target,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = vlan_dev_poll_controller,
+       .ndo_netpoll_setup      = vlan_dev_netpoll_setup,
+       .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
 };
index 061cece..6f97553 100644 (file)
@@ -29,7 +29,7 @@ static struct garp_application vlan_gvrp_app __read_mostly = {
 
 int vlan_gvrp_request_join(const struct net_device *dev)
 {
-       const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        __be16 vlan_id = htons(vlan->vlan_id);
 
        return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
@@ -38,7 +38,7 @@ int vlan_gvrp_request_join(const struct net_device *dev)
 
 void vlan_gvrp_request_leave(const struct net_device *dev)
 {
-       const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        __be16 vlan_id = htons(vlan->vlan_id);
 
        garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
index 235c219..5071136 100644 (file)
@@ -105,7 +105,7 @@ static int vlan_changelink(struct net_device *dev,
 static int vlan_newlink(struct net *src_net, struct net_device *dev,
                        struct nlattr *tb[], struct nlattr *data[])
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev;
        int err;
 
@@ -149,7 +149,7 @@ static inline size_t vlan_qos_map_size(unsigned int n)
 
 static size_t vlan_get_size(const struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
        return nla_total_size(2) +      /* IFLA_VLAN_ID */
               sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
@@ -159,14 +159,14 @@ static size_t vlan_get_size(const struct net_device *dev)
 
 static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct vlan_priority_tci_mapping *pm;
        struct ifla_vlan_flags f;
        struct ifla_vlan_qos_mapping m;
        struct nlattr *nest;
        unsigned int i;
 
-       NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_info(dev)->vlan_id);
+       NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id);
        if (vlan->flags) {
                f.flags = vlan->flags;
                f.mask  = ~0;
@@ -218,7 +218,7 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
        .kind           = "vlan",
        .maxtype        = IFLA_VLAN_MAX,
        .policy         = vlan_policy,
-       .priv_size      = sizeof(struct vlan_dev_info),
+       .priv_size      = sizeof(struct vlan_dev_priv),
        .setup          = vlan_setup,
        .validate       = vlan_validate,
        .newlink        = vlan_newlink,
index d34b6da..c718fd3 100644 (file)
@@ -168,13 +168,13 @@ err:
 
 int vlan_proc_add_dev(struct net_device *vlandev)
 {
-       struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
        struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
 
-       dev_info->dent =
+       vlan->dent =
                proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
                                 vn->proc_vlan_dir, &vlandev_fops, vlandev);
-       if (!dev_info->dent)
+       if (!vlan->dent)
                return -ENOBUFS;
        return 0;
 }
@@ -187,10 +187,10 @@ int vlan_proc_rem_dev(struct net_device *vlandev)
        struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
 
        /** NOTE:  This will consume the memory pointed to by dent, it seems. */
-       if (vlan_dev_info(vlandev)->dent) {
-               remove_proc_entry(vlan_dev_info(vlandev)->dent->name,
+       if (vlan_dev_priv(vlandev)->dent) {
+               remove_proc_entry(vlan_dev_priv(vlandev)->dent->name,
                                  vn->proc_vlan_dir);
-               vlan_dev_info(vlandev)->dent = NULL;
+               vlan_dev_priv(vlandev)->dent = NULL;
        }
        return 0;
 }
@@ -268,10 +268,10 @@ static int vlan_seq_show(struct seq_file *seq, void *v)
                           nmtype ? nmtype :  "UNKNOWN");
        } else {
                const struct net_device *vlandev = v;
-               const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+               const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 
                seq_printf(seq, "%-15s| %d  | %s\n",  vlandev->name,
-                          dev_info->vlan_id,    dev_info->real_dev->name);
+                          vlan->vlan_id,    vlan->real_dev->name);
        }
        return 0;
 }
@@ -279,7 +279,7 @@ static int vlan_seq_show(struct seq_file *seq, void *v)
 static int vlandev_seq_show(struct seq_file *seq, void *offset)
 {
        struct net_device *vlandev = (struct net_device *) seq->private;
-       const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
        struct rtnl_link_stats64 temp;
        const struct rtnl_link_stats64 *stats;
        static const char fmt64[] = "%30s %12llu\n";
@@ -291,8 +291,8 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
        stats = dev_get_stats(vlandev, &temp);
        seq_printf(seq,
                   "%s  VID: %d  REORDER_HDR: %i  dev->priv_flags: %hx\n",
-                  vlandev->name, dev_info->vlan_id,
-                  (int)(dev_info->flags & 1), vlandev->priv_flags);
+                  vlandev->name, vlan->vlan_id,
+                  (int)(vlan->flags & 1), vlandev->priv_flags);
 
        seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
        seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
@@ -300,23 +300,23 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
        seq_puts(seq, "\n");
        seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
        seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
-       seq_printf(seq, "Device: %s", dev_info->real_dev->name);
+       seq_printf(seq, "Device: %s", vlan->real_dev->name);
        /* now show all PRIORITY mappings relating to this VLAN */
        seq_printf(seq, "\nINGRESS priority mappings: "
                        "0:%u  1:%u  2:%u  3:%u  4:%u  5:%u  6:%u 7:%u\n",
-                  dev_info->ingress_priority_map[0],
-                  dev_info->ingress_priority_map[1],
-                  dev_info->ingress_priority_map[2],
-                  dev_info->ingress_priority_map[3],
-                  dev_info->ingress_priority_map[4],
-                  dev_info->ingress_priority_map[5],
-                  dev_info->ingress_priority_map[6],
-                  dev_info->ingress_priority_map[7]);
+                  vlan->ingress_priority_map[0],
+                  vlan->ingress_priority_map[1],
+                  vlan->ingress_priority_map[2],
+                  vlan->ingress_priority_map[3],
+                  vlan->ingress_priority_map[4],
+                  vlan->ingress_priority_map[5],
+                  vlan->ingress_priority_map[6],
+                  vlan->ingress_priority_map[7]);
 
        seq_printf(seq, " EGRESS priority mappings: ");
        for (i = 0; i < 16; i++) {
                const struct vlan_priority_tci_mapping *mp
-                       = dev_info->egress_priority_map[i];
+                       = vlan->egress_priority_map[i];
                while (mp) {
                        seq_printf(seq, "%u:%hu ",
                                   mp->priority, ((mp->vlan_qos >> 13) & 0x7));
index c84ce7f..c12c258 100644 (file)
@@ -338,7 +338,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
                dev->stats.tx_dropped++;
                return NETDEV_TX_OK;
        }
-       n = dst_get_neighbour(dst);
+       n = dst_get_neighbour_noref(dst);
        if (!n) {
                pr_err("NO NEIGHBOUR !\n");
                dev_kfree_skb(skb);
index 9373a14..24403a7 100644 (file)
@@ -695,7 +695,7 @@ bool gw_out_of_range(struct bat_priv *bat_priv,
        }
 
        neigh_old = find_router(bat_priv, orig_dst_node, NULL);
-       if (!!neigh_old)
+       if (!neigh_old)
                goto out;
 
        if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD)
index ac3520e..d9c1e7b 100644 (file)
@@ -136,10 +136,9 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
 
        spin_unlock_bh(&socket_client->lock);
 
-       error = __copy_to_user(buf, &socket_packet->icmp_packet,
-                              socket_packet->icmp_len);
+       packet_len = min(count, socket_packet->icmp_len);
+       error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len);
 
-       packet_len = socket_packet->icmp_len;
        kfree(socket_packet);
 
        if (error)
@@ -187,12 +186,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
        skb_reserve(skb, sizeof(struct ethhdr));
        icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
 
-       if (!access_ok(VERIFY_READ, buff, packet_len)) {
-               len = -EFAULT;
-               goto free_skb;
-       }
-
-       if (__copy_from_user(icmp_packet, buff, packet_len)) {
+       if (copy_from_user(icmp_packet, buff, packet_len)) {
                len = -EFAULT;
                goto free_skb;
        }
@@ -217,7 +211,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
 
        if (icmp_packet->version != COMPAT_VERSION) {
                icmp_packet->msg_type = PARAMETER_PROBLEM;
-               icmp_packet->ttl = COMPAT_VERSION;
+               icmp_packet->version = COMPAT_VERSION;
                bat_socket_add_packet(socket_client, icmp_packet, packet_len);
                goto free_skb;
        }
index ef24a72..773e606 100644 (file)
@@ -627,8 +627,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
 
                        /* Ensure we have all the claimed data */
                        if (unlikely(skb_headlen(skb) <
-                                       sizeof(struct tt_query_packet) +
-                                       tt_len))
+                                    sizeof(struct tt_query_packet) + tt_len))
                                goto out;
 
                        handle_tt_response(bat_priv, tt_query);
index 45297c8..987c75a 100644 (file)
@@ -874,7 +874,7 @@ unreg_debugfs:
 unreg_sysfs:
        sysfs_del_meshif(soft_iface);
 unreg_soft_iface:
-       unregister_netdev(soft_iface);
+       unregister_netdevice(soft_iface);
        return NULL;
 
 free_soft_iface:
index 78b9528..ab8dea8 100644 (file)
@@ -36,18 +36,9 @@ static void _tt_global_del(struct bat_priv *bat_priv,
 static void tt_purge(struct work_struct *work);
 
 /* returns 1 if they are the same mac addr */
-static int compare_ltt(const struct hlist_node *node, const void *data2)
+static int compare_tt(const struct hlist_node *node, const void *data2)
 {
-       const void *data1 = container_of(node, struct tt_local_entry,
-                                        hash_entry);
-
-       return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
-}
-
-/* returns 1 if they are the same mac addr */
-static int compare_gtt(const struct hlist_node *node, const void *data2)
-{
-       const void *data1 = container_of(node, struct tt_global_entry,
+       const void *data1 = container_of(node, struct tt_common_entry,
                                         hash_entry);
 
        return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
@@ -60,13 +51,12 @@ static void tt_start_timer(struct bat_priv *bat_priv)
                           msecs_to_jiffies(5000));
 }
 
-static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
-                                                const void *data)
+static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
+                                           const void *data)
 {
-       struct hashtable_t *hash = bat_priv->tt_local_hash;
        struct hlist_head *head;
        struct hlist_node *node;
-       struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
+       struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
        uint32_t index;
 
        if (!hash)
@@ -76,51 +66,46 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
        head = &hash->table[index];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
-               if (!compare_eth(tt_local_entry, data))
+       hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
+               if (!compare_eth(tt_common_entry, data))
                        continue;
 
-               if (!atomic_inc_not_zero(&tt_local_entry->refcount))
+               if (!atomic_inc_not_zero(&tt_common_entry->refcount))
                        continue;
 
-               tt_local_entry_tmp = tt_local_entry;
+               tt_common_entry_tmp = tt_common_entry;
                break;
        }
        rcu_read_unlock();
 
-       return tt_local_entry_tmp;
+       return tt_common_entry_tmp;
 }
 
-static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
-                                                  const void *data)
+static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
+                                                const void *data)
 {
-       struct hashtable_t *hash = bat_priv->tt_global_hash;
-       struct hlist_head *head;
-       struct hlist_node *node;
-       struct tt_global_entry *tt_global_entry;
-       struct tt_global_entry *tt_global_entry_tmp = NULL;
-       uint32_t index;
-
-       if (!hash)
-               return NULL;
+       struct tt_common_entry *tt_common_entry;
+       struct tt_local_entry *tt_local_entry = NULL;
 
-       index = choose_orig(data, hash->size);
-       head = &hash->table[index];
+       tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
+       if (tt_common_entry)
+               tt_local_entry = container_of(tt_common_entry,
+                                             struct tt_local_entry, common);
+       return tt_local_entry;
+}
 
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
-               if (!compare_eth(tt_global_entry, data))
-                       continue;
+static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
+                                                  const void *data)
+{
+       struct tt_common_entry *tt_common_entry;
+       struct tt_global_entry *tt_global_entry = NULL;
 
-               if (!atomic_inc_not_zero(&tt_global_entry->refcount))
-                       continue;
+       tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
+       if (tt_common_entry)
+               tt_global_entry = container_of(tt_common_entry,
+                                              struct tt_global_entry, common);
+       return tt_global_entry;
 
-               tt_global_entry_tmp = tt_global_entry;
-               break;
-       }
-       rcu_read_unlock();
-
-       return tt_global_entry_tmp;
 }
 
 static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
@@ -133,15 +118,18 @@ static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
 
 static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
 {
-       if (atomic_dec_and_test(&tt_local_entry->refcount))
-               kfree_rcu(tt_local_entry, rcu);
+       if (atomic_dec_and_test(&tt_local_entry->common.refcount))
+               kfree_rcu(tt_local_entry, common.rcu);
 }
 
 static void tt_global_entry_free_rcu(struct rcu_head *rcu)
 {
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
 
-       tt_global_entry = container_of(rcu, struct tt_global_entry, rcu);
+       tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
+       tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+                                      common);
 
        if (tt_global_entry->orig_node)
                orig_node_free_ref(tt_global_entry->orig_node);
@@ -151,8 +139,9 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu)
 
 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
 {
-       if (atomic_dec_and_test(&tt_global_entry->refcount))
-               call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu);
+       if (atomic_dec_and_test(&tt_global_entry->common.refcount))
+               call_rcu(&tt_global_entry->common.rcu,
+                        tt_global_entry_free_rcu);
 }
 
 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -201,6 +190,7 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
        struct tt_local_entry *tt_local_entry = NULL;
        struct tt_global_entry *tt_global_entry = NULL;
+       int hash_added;
 
        tt_local_entry = tt_local_hash_find(bat_priv, addr);
 
@@ -217,26 +207,33 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
                (uint8_t)atomic_read(&bat_priv->ttvn));
 
-       memcpy(tt_local_entry->addr, addr, ETH_ALEN);
-       tt_local_entry->last_seen = jiffies;
-       tt_local_entry->flags = NO_FLAGS;
+       memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
+       tt_local_entry->common.flags = NO_FLAGS;
        if (is_wifi_iface(ifindex))
-               tt_local_entry->flags |= TT_CLIENT_WIFI;
-       atomic_set(&tt_local_entry->refcount, 2);
+               tt_local_entry->common.flags |= TT_CLIENT_WIFI;
+       atomic_set(&tt_local_entry->common.refcount, 2);
+       tt_local_entry->last_seen = jiffies;
 
        /* the batman interface mac address should never be purged */
        if (compare_eth(addr, soft_iface->dev_addr))
-               tt_local_entry->flags |= TT_CLIENT_NOPURGE;
+               tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
 
-       tt_local_event(bat_priv, addr, tt_local_entry->flags);
+       hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
+                        &tt_local_entry->common,
+                        &tt_local_entry->common.hash_entry);
+
+       if (unlikely(hash_added != 0)) {
+               /* remove the reference for the hash */
+               tt_local_entry_free_ref(tt_local_entry);
+               goto out;
+       }
+
+       tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
 
        /* The local entry has to be marked as NEW to avoid to send it in
         * a full table response going out before the next ttvn increment
         * (consistency check) */
-       tt_local_entry->flags |= TT_CLIENT_NEW;
-
-       hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
-                tt_local_entry, &tt_local_entry->hash_entry);
+       tt_local_entry->common.flags |= TT_CLIENT_NEW;
 
        /* remove address from global hash if present */
        tt_global_entry = tt_global_hash_find(bat_priv, addr);
@@ -245,10 +242,11 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        if (tt_global_entry) {
                /* This node is probably going to update its tt table */
                tt_global_entry->orig_node->tt_poss_change = true;
-               /* The global entry has to be marked as PENDING and has to be
+               /* The global entry has to be marked as ROAMING and has to be
                 * kept for consistency purpose */
-               tt_global_entry->flags |= TT_CLIENT_PENDING;
-               send_roam_adv(bat_priv, tt_global_entry->addr,
+               tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+               tt_global_entry->roam_at = jiffies;
+               send_roam_adv(bat_priv, tt_global_entry->common.addr,
                              tt_global_entry->orig_node);
        }
 out:
@@ -310,7 +308,7 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->tt_local_hash;
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct hard_iface *primary_if;
        struct hlist_node *node;
        struct hlist_head *head;
@@ -340,19 +338,19 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
                        seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
-                                       tt_local_entry->addr,
-                                       (tt_local_entry->flags &
+                                       tt_common_entry->addr,
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_ROAM ? 'R' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_NOPURGE ? 'P' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_NEW ? 'N' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_PENDING ? 'X' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_WIFI ? 'W' : '.'));
                }
                rcu_read_unlock();
@@ -367,13 +365,13 @@ static void tt_local_set_pending(struct bat_priv *bat_priv,
                                 struct tt_local_entry *tt_local_entry,
                                 uint16_t flags)
 {
-       tt_local_event(bat_priv, tt_local_entry->addr,
-                      tt_local_entry->flags | flags);
+       tt_local_event(bat_priv, tt_local_entry->common.addr,
+                      tt_local_entry->common.flags | flags);
 
        /* The local client has to be marked as "pending to be removed" but has
         * to be kept in the table in order to send it in a full table
         * response issued before the net ttvn increment (consistency check) */
-       tt_local_entry->flags |= TT_CLIENT_PENDING;
+       tt_local_entry->common.flags |= TT_CLIENT_PENDING;
 }
 
 void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -389,7 +387,7 @@ void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
                             (roaming ? TT_CLIENT_ROAM : NO_FLAGS));
 
        bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
-               "%s\n", tt_local_entry->addr, message);
+               "%s\n", tt_local_entry->common.addr, message);
 out:
        if (tt_local_entry)
                tt_local_entry_free_ref(tt_local_entry);
@@ -399,6 +397,7 @@ static void tt_local_purge(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->tt_local_hash;
        struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -409,13 +408,16 @@ static void tt_local_purge(struct bat_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
-                       if (tt_local_entry->flags & TT_CLIENT_NOPURGE)
+                       tt_local_entry = container_of(tt_common_entry,
+                                                     struct tt_local_entry,
+                                                     common);
+                       if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
                                continue;
 
                        /* entry already marked for deletion */
-                       if (tt_local_entry->flags & TT_CLIENT_PENDING)
+                       if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
                                continue;
 
                        if (!is_out_of_time(tt_local_entry->last_seen,
@@ -426,7 +428,7 @@ static void tt_local_purge(struct bat_priv *bat_priv)
                                             TT_CLIENT_DEL);
                        bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
                                "pending to be removed: timed out\n",
-                               tt_local_entry->addr);
+                               tt_local_entry->common.addr);
                }
                spin_unlock_bh(list_lock);
        }
@@ -437,6 +439,7 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash;
        spinlock_t *list_lock; /* protects write access to the hash lists */
+       struct tt_common_entry *tt_common_entry;
        struct tt_local_entry *tt_local_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
@@ -452,9 +455,12 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
                        hlist_del_rcu(node);
+                       tt_local_entry = container_of(tt_common_entry,
+                                                     struct tt_local_entry,
+                                                     common);
                        tt_local_entry_free_ref(tt_local_entry);
                }
                spin_unlock_bh(list_lock);
@@ -502,6 +508,7 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
        struct tt_global_entry *tt_global_entry;
        struct orig_node *orig_node_tmp;
        int ret = 0;
+       int hash_added;
 
        tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
 
@@ -512,18 +519,24 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
                if (!tt_global_entry)
                        goto out;
 
-               memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
+               memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
+               tt_global_entry->common.flags = NO_FLAGS;
+               atomic_set(&tt_global_entry->common.refcount, 2);
                /* Assign the new orig_node */
                atomic_inc(&orig_node->refcount);
                tt_global_entry->orig_node = orig_node;
                tt_global_entry->ttvn = ttvn;
-               tt_global_entry->flags = NO_FLAGS;
                tt_global_entry->roam_at = 0;
-               atomic_set(&tt_global_entry->refcount, 2);
 
-               hash_add(bat_priv->tt_global_hash, compare_gtt,
-                        choose_orig, tt_global_entry,
-                        &tt_global_entry->hash_entry);
+               hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
+                                choose_orig, &tt_global_entry->common,
+                                &tt_global_entry->common.hash_entry);
+
+               if (unlikely(hash_added != 0)) {
+                       /* remove the reference for the hash */
+                       tt_global_entry_free_ref(tt_global_entry);
+                       goto out_remove;
+               }
                atomic_inc(&orig_node->tt_size);
        } else {
                if (tt_global_entry->orig_node != orig_node) {
@@ -534,20 +547,21 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
                        orig_node_free_ref(orig_node_tmp);
                        atomic_inc(&orig_node->tt_size);
                }
+               tt_global_entry->common.flags = NO_FLAGS;
                tt_global_entry->ttvn = ttvn;
-               tt_global_entry->flags = NO_FLAGS;
                tt_global_entry->roam_at = 0;
        }
 
        if (wifi)
-               tt_global_entry->flags |= TT_CLIENT_WIFI;
+               tt_global_entry->common.flags |= TT_CLIENT_WIFI;
 
        bat_dbg(DBG_TT, bat_priv,
                "Creating new global tt entry: %pM (via %pM)\n",
-               tt_global_entry->addr, orig_node->orig);
+               tt_global_entry->common.addr, orig_node->orig);
 
+out_remove:
        /* remove address from local hash if present */
-       tt_local_remove(bat_priv, tt_global_entry->addr,
+       tt_local_remove(bat_priv, tt_global_entry->common.addr,
                        "global tt received", roaming);
        ret = 1;
 out:
@@ -561,6 +575,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->tt_global_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hard_iface *primary_if;
        struct hlist_node *node;
@@ -593,20 +608,24 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_global_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
                        seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   "
-                                       "[%c%c%c]\n", tt_global_entry->addr,
+                                       "[%c%c%c]\n",
+                                       tt_global_entry->common.addr,
                                        tt_global_entry->ttvn,
                                        tt_global_entry->orig_node->orig,
                                        (uint8_t) atomic_read(
                                                &tt_global_entry->orig_node->
                                                last_ttvn),
-                                       (tt_global_entry->flags &
+                                       (tt_global_entry->common.flags &
                                         TT_CLIENT_ROAM ? 'R' : '.'),
-                                       (tt_global_entry->flags &
+                                       (tt_global_entry->common.flags &
                                         TT_CLIENT_PENDING ? 'X' : '.'),
-                                       (tt_global_entry->flags &
+                                       (tt_global_entry->common.flags &
                                         TT_CLIENT_WIFI ? 'W' : '.'));
                }
                rcu_read_unlock();
@@ -626,13 +645,13 @@ static void _tt_global_del(struct bat_priv *bat_priv,
 
        bat_dbg(DBG_TT, bat_priv,
                "Deleting global tt entry %pM (via %pM): %s\n",
-               tt_global_entry->addr, tt_global_entry->orig_node->orig,
+               tt_global_entry->common.addr, tt_global_entry->orig_node->orig,
                message);
 
        atomic_dec(&tt_global_entry->orig_node->tt_size);
 
-       hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
-                   tt_global_entry->addr);
+       hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
+                   tt_global_entry->common.addr);
 out:
        if (tt_global_entry)
                tt_global_entry_free_ref(tt_global_entry);
@@ -643,6 +662,7 @@ void tt_global_del(struct bat_priv *bat_priv,
                   const char *message, bool roaming)
 {
        struct tt_global_entry *tt_global_entry = NULL;
+       struct tt_local_entry *tt_local_entry = NULL;
 
        tt_global_entry = tt_global_hash_find(bat_priv, addr);
        if (!tt_global_entry)
@@ -650,21 +670,36 @@ void tt_global_del(struct bat_priv *bat_priv,
 
        if (tt_global_entry->orig_node == orig_node) {
                if (roaming) {
-                       tt_global_entry->flags |= TT_CLIENT_ROAM;
-                       tt_global_entry->roam_at = jiffies;
-                       goto out;
+                       /* if we are deleting a global entry due to a roam
+                        * event, there are two possibilities:
+                        * 1) the client roamed from node A to node B => we mark
+                        *    it with TT_CLIENT_ROAM, we start a timer and we
+                        *    wait for node B to claim it. In case of timeout
+                        *    the entry is purged.
+                        * 2) the client roamed to us => we can directly delete
+                        *    the global entry, since it is useless now. */
+                       tt_local_entry = tt_local_hash_find(bat_priv,
+                                                           tt_global_entry->common.addr);
+                       if (!tt_local_entry) {
+                               tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+                               tt_global_entry->roam_at = jiffies;
+                               goto out;
+                       }
                }
                _tt_global_del(bat_priv, tt_global_entry, message);
        }
 out:
        if (tt_global_entry)
                tt_global_entry_free_ref(tt_global_entry);
+       if (tt_local_entry)
+               tt_local_entry_free_ref(tt_local_entry);
 }
 
 void tt_global_del_orig(struct bat_priv *bat_priv,
                        struct orig_node *orig_node, const char *message)
 {
        struct tt_global_entry *tt_global_entry;
+       struct tt_common_entry *tt_common_entry;
        uint32_t i;
        struct hashtable_t *hash = bat_priv->tt_global_hash;
        struct hlist_node *node, *safe;
@@ -679,13 +714,16 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_global_entry, node, safe,
+               hlist_for_each_entry_safe(tt_common_entry, node, safe,
                                         head, hash_entry) {
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
                        if (tt_global_entry->orig_node == orig_node) {
                                bat_dbg(DBG_TT, bat_priv,
                                        "Deleting global tt entry %pM "
                                        "(via %pM): %s\n",
-                                       tt_global_entry->addr,
+                                       tt_global_entry->common.addr,
                                        tt_global_entry->orig_node->orig,
                                        message);
                                hlist_del_rcu(node);
@@ -700,6 +738,7 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
 static void tt_global_roam_purge(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->tt_global_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
@@ -711,9 +750,12 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
-                       if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
+                       if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
                                continue;
                        if (!is_out_of_time(tt_global_entry->roam_at,
                                            TT_CLIENT_ROAM_TIMEOUT * 1000))
@@ -721,7 +763,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
 
                        bat_dbg(DBG_TT, bat_priv, "Deleting global "
                                "tt entry (%pM): Roaming timeout\n",
-                               tt_global_entry->addr);
+                               tt_global_entry->common.addr);
                        atomic_dec(&tt_global_entry->orig_node->tt_size);
                        hlist_del_rcu(node);
                        tt_global_entry_free_ref(tt_global_entry);
@@ -735,6 +777,7 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash;
        spinlock_t *list_lock; /* protects write access to the hash lists */
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
@@ -750,9 +793,12 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
                        hlist_del_rcu(node);
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
                        tt_global_entry_free_ref(tt_global_entry);
                }
                spin_unlock_bh(list_lock);
@@ -768,8 +814,8 @@ static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
 {
        bool ret = false;
 
-       if (tt_local_entry->flags & TT_CLIENT_WIFI &&
-           tt_global_entry->flags & TT_CLIENT_WIFI)
+       if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
+           tt_global_entry->common.flags & TT_CLIENT_WIFI)
                ret = true;
 
        return ret;
@@ -802,7 +848,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
 
        /* A global client marked as PENDING has already moved from that
         * originator */
-       if (tt_global_entry->flags & TT_CLIENT_PENDING)
+       if (tt_global_entry->common.flags & TT_CLIENT_PENDING)
                goto out;
 
        orig_node = tt_global_entry->orig_node;
@@ -821,6 +867,7 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
 {
        uint16_t total = 0, total_one;
        struct hashtable_t *hash = bat_priv->tt_global_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hlist_node *node;
        struct hlist_head *head;
@@ -831,20 +878,23 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_global_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
                        if (compare_eth(tt_global_entry->orig_node,
                                        orig_node)) {
                                /* Roaming clients are in the global table for
                                 * consistency only. They don't have to be
                                 * taken into account while computing the
                                 * global crc */
-                               if (tt_global_entry->flags & TT_CLIENT_ROAM)
+                               if (tt_common_entry->flags & TT_CLIENT_ROAM)
                                        continue;
                                total_one = 0;
                                for (j = 0; j < ETH_ALEN; j++)
                                        total_one = crc16_byte(total_one,
-                                               tt_global_entry->addr[j]);
+                                               tt_common_entry->addr[j]);
                                total ^= total_one;
                        }
                }
@@ -859,7 +909,7 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
 {
        uint16_t total = 0, total_one;
        struct hashtable_t *hash = bat_priv->tt_local_hash;
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct hlist_node *node;
        struct hlist_head *head;
        uint32_t i;
@@ -869,16 +919,16 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
                        /* not yet committed clients have not to be taken into
                         * account while computing the CRC */
-                       if (tt_local_entry->flags & TT_CLIENT_NEW)
+                       if (tt_common_entry->flags & TT_CLIENT_NEW)
                                continue;
                        total_one = 0;
                        for (j = 0; j < ETH_ALEN; j++)
                                total_one = crc16_byte(total_one,
-                                                  tt_local_entry->addr[j]);
+                                                  tt_common_entry->addr[j]);
                        total ^= total_one;
                }
                rcu_read_unlock();
@@ -967,21 +1017,25 @@ unlock:
 /* data_ptr is useless here, but has to be kept to respect the prototype */
 static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
 {
-       const struct tt_local_entry *tt_local_entry = entry_ptr;
+       const struct tt_common_entry *tt_common_entry = entry_ptr;
 
-       if (tt_local_entry->flags & TT_CLIENT_NEW)
+       if (tt_common_entry->flags & TT_CLIENT_NEW)
                return 0;
        return 1;
 }
 
 static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
 {
-       const struct tt_global_entry *tt_global_entry = entry_ptr;
+       const struct tt_common_entry *tt_common_entry = entry_ptr;
+       const struct tt_global_entry *tt_global_entry;
        const struct orig_node *orig_node = data_ptr;
 
-       if (tt_global_entry->flags & TT_CLIENT_ROAM)
+       if (tt_common_entry->flags & TT_CLIENT_ROAM)
                return 0;
 
+       tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+                                      common);
+
        return (tt_global_entry->orig_node == orig_node);
 }
 
@@ -992,7 +1046,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
                                                              const void *),
                                              void *cb_data)
 {
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct tt_query_packet *tt_response;
        struct tt_change *tt_change;
        struct hlist_node *node;
@@ -1024,15 +1078,16 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
                        if (tt_count == tt_tot)
                                break;
 
-                       if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
+                       if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
                                continue;
 
-                       memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
+                       memcpy(tt_change->addr, tt_common_entry->addr,
+                              ETH_ALEN);
                        tt_change->flags = NO_FLAGS;
 
                        tt_count++;
@@ -1449,7 +1504,7 @@ bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
                goto out;
        /* Check if the client has been logically deleted (but is kept for
         * consistency purpose) */
-       if (tt_local_entry->flags & TT_CLIENT_PENDING)
+       if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
                goto out;
        ret = true;
 out:
@@ -1672,40 +1727,48 @@ void tt_free(struct bat_priv *bat_priv)
        kfree(bat_priv->tt_buff);
 }
 
-/* This function will reset the specified flags from all the entries in
- * the given hash table and will increment num_local_tt for each involved
- * entry */
-static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
+/* This function will enable or disable the specified flags for all the entries
+ * in the given hash table and returns the number of modified entries */
+static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
+                            bool enable)
 {
        uint32_t i;
-       struct hashtable_t *hash = bat_priv->tt_local_hash;
+       uint16_t changed_num = 0;
        struct hlist_head *head;
        struct hlist_node *node;
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
 
        if (!hash)
-               return;
+               goto out;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
-                       if (!(tt_local_entry->flags & flags))
-                               continue;
-                       tt_local_entry->flags &= ~flags;
-                       atomic_inc(&bat_priv->num_local_tt);
+                       if (enable) {
+                               if ((tt_common_entry->flags & flags) == flags)
+                                       continue;
+                               tt_common_entry->flags |= flags;
+                       } else {
+                               if (!(tt_common_entry->flags & flags))
+                                       continue;
+                               tt_common_entry->flags &= ~flags;
+                       }
+                       changed_num++;
                }
                rcu_read_unlock();
        }
-
+out:
+       return changed_num;
 }
 
 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
 static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->tt_local_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_local_entry *tt_local_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
@@ -1720,16 +1783,19 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
-                       if (!(tt_local_entry->flags & TT_CLIENT_PENDING))
+                       if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
                                continue;
 
                        bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry "
-                               "(%pM): pending\n", tt_local_entry->addr);
+                               "(%pM): pending\n", tt_common_entry->addr);
 
                        atomic_dec(&bat_priv->num_local_tt);
                        hlist_del_rcu(node);
+                       tt_local_entry = container_of(tt_common_entry,
+                                                     struct tt_local_entry,
+                                                     common);
                        tt_local_entry_free_ref(tt_local_entry);
                }
                spin_unlock_bh(list_lock);
@@ -1739,7 +1805,11 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
 
 void tt_commit_changes(struct bat_priv *bat_priv)
 {
-       tt_local_reset_flags(bat_priv, TT_CLIENT_NEW);
+       uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
+                                           TT_CLIENT_NEW, false);
+       /* all the reset entries have now to be effectively counted as local
+        * entries */
+       atomic_add(changed_num, &bat_priv->num_local_tt);
        tt_local_purge_pending_clients(bat_priv);
 
        /* Increment the TTVN only once per OGM interval */
index ab8d0fe..e9eb043 100644 (file)
@@ -222,24 +222,24 @@ struct socket_packet {
        struct icmp_packet_rr icmp_packet;
 };
 
-struct tt_local_entry {
+struct tt_common_entry {
        uint8_t addr[ETH_ALEN];
        struct hlist_node hash_entry;
-       unsigned long last_seen;
        uint16_t flags;
        atomic_t refcount;
        struct rcu_head rcu;
 };
 
+struct tt_local_entry {
+       struct tt_common_entry common;
+       unsigned long last_seen;
+};
+
 struct tt_global_entry {
-       uint8_t addr[ETH_ALEN];
-       struct hlist_node hash_entry; /* entry in the global table */
+       struct tt_common_entry common;
        struct orig_node *orig_node;
        uint8_t ttvn;
-       uint16_t flags; /* only TT_GLOBAL_ROAM is used */
        unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
-       atomic_t refcount;
-       struct rcu_head rcu;
 };
 
 struct tt_change_node {
index 7445413..cc3b9f2 100644 (file)
@@ -609,7 +609,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
        struct vis_info *info = bat_priv->my_vis_info;
        struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
        struct vis_info_entry *entry;
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        int best_tq = -1;
        uint32_t i;
 
@@ -672,13 +672,13 @@ next:
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node, head,
+               hlist_for_each_entry_rcu(tt_common_entry, node, head,
                                         hash_entry) {
                        entry = (struct vis_info_entry *)
                                        skb_put(info->skb_packet,
                                                sizeof(*entry));
                        memset(entry->src, 0, ETH_ALEN);
-                       memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN);
+                       memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
                        entry->quality = 0; /* 0 means TT */
                        packet->entries++;
 
index a6cd856..a779ec7 100644 (file)
@@ -56,8 +56,8 @@
 
 #define VERSION "1.3"
 
-static int compress_src = 1;
-static int compress_dst = 1;
+static bool compress_src = true;
+static bool compress_dst = true;
 
 static LIST_HEAD(bnep_session_list);
 static DECLARE_RWSEM(bnep_session_sem);
@@ -77,17 +77,12 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
 
 static void __bnep_link_session(struct bnep_session *s)
 {
-       /* It's safe to call __module_get() here because sessions are added
-          by the socket layer which has to hold the reference to this module.
-        */
-       __module_get(THIS_MODULE);
        list_add(&s->list, &bnep_session_list);
 }
 
 static void __bnep_unlink_session(struct bnep_session *s)
 {
        list_del(&s->list);
-       module_put(THIS_MODULE);
 }
 
 static int bnep_send(struct bnep_session *s, void *data, size_t len)
@@ -528,6 +523,7 @@ static int bnep_session(void *arg)
 
        up_write(&bnep_session_sem);
        free_netdev(dev);
+       module_put_and_exit(0);
        return 0;
 }
 
@@ -614,9 +610,11 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
 
        __bnep_link_session(s);
 
+       __module_get(THIS_MODULE);
        s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
        if (IS_ERR(s->task)) {
                /* Session thread start failed, gotta cleanup. */
+               module_put(THIS_MODULE);
                unregister_netdev(dev);
                __bnep_unlink_session(s);
                err = PTR_ERR(s->task);
index 9e8940b..6c9c1fd 100644 (file)
@@ -65,14 +65,12 @@ static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
 
 static void __cmtp_link_session(struct cmtp_session *session)
 {
-       __module_get(THIS_MODULE);
        list_add(&session->list, &cmtp_session_list);
 }
 
 static void __cmtp_unlink_session(struct cmtp_session *session)
 {
        list_del(&session->list);
-       module_put(THIS_MODULE);
 }
 
 static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
@@ -325,6 +323,7 @@ static int cmtp_session(void *arg)
        up_write(&cmtp_session_sem);
 
        kfree(session);
+       module_put_and_exit(0);
        return 0;
 }
 
@@ -374,9 +373,11 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
 
        __cmtp_link_session(session);
 
+       __module_get(THIS_MODULE);
        session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
                                                                session->num);
        if (IS_ERR(session->task)) {
+               module_put(THIS_MODULE);
                err = PTR_ERR(session->task);
                goto unlink;
        }
index de0b93e..3fa08dd 100644 (file)
@@ -123,7 +123,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn)
 
        BT_DBG("%p", conn);
 
-       if (conn->hdev->hci_ver < 2)
+       if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
                return;
 
        bacpy(&cp.bdaddr, &conn->dst);
@@ -674,7 +674,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
                goto encrypt;
 
 auth:
-       if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+       if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
                return 0;
 
        if (!hci_conn_auth(conn, sec_level, auth_type))
index fb3feeb..ce3727e 100644 (file)
@@ -54,6 +54,8 @@
 
 #define AUTO_OFF_TIMEOUT 2000
 
+int enable_hs;
+
 static void hci_cmd_task(unsigned long arg);
 static void hci_rx_task(unsigned long arg);
 static void hci_tx_task(unsigned long arg);
@@ -228,18 +230,6 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
        /* Read Buffer Size (ACL mtu, max pkt, etc.) */
        hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
 
-#if 0
-       /* Host buffer size */
-       {
-               struct hci_cp_host_buffer_size cp;
-               cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
-               cp.sco_mtu = HCI_MAX_SCO_SIZE;
-               cp.acl_max_pkt = cpu_to_le16(0xffff);
-               cp.sco_max_pkt = cpu_to_le16(0xffff);
-               hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
-       }
-#endif
-
        /* Read BD Address */
        hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
 
@@ -521,8 +511,9 @@ int hci_dev_open(__u16 dev)
        if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
                set_bit(HCI_RAW, &hdev->flags);
 
-       /* Treat all non BR/EDR controllers as raw devices for now */
-       if (hdev->dev_type != HCI_BREDR)
+       /* Treat all non BR/EDR controllers as raw devices if
+          enable_hs is not set */
+       if (hdev->dev_type != HCI_BREDR && !enable_hs)
                set_bit(HCI_RAW, &hdev->flags);
 
        if (hdev->open(hdev)) {
@@ -1336,14 +1327,12 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
        struct bdaddr_list *entry;
 
-       if (bacmp(bdaddr, BDADDR_ANY) == 0) {
+       if (bacmp(bdaddr, BDADDR_ANY) == 0)
                return hci_blacklist_clear(hdev);
-       }
 
        entry = hci_blacklist_lookup(hdev, bdaddr);
-       if (!entry) {
+       if (!entry)
                return -ENOENT;
-       }
 
        list_del(&entry->list);
        kfree(entry);
@@ -1451,12 +1440,13 @@ int hci_register_dev(struct hci_dev *hdev)
 
        sprintf(hdev->name, "hci%d", id);
        hdev->id = id;
-       list_add(&hdev->list, head);
+       list_add_tail(&hdev->list, head);
 
        atomic_set(&hdev->refcnt, 1);
        spin_lock_init(&hdev->lock);
 
        hdev->flags = 0;
+       hdev->dev_flags = 0;
        hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
        hdev->esco_type = (ESCO_HV1);
        hdev->link_mode = (HCI_LM_ACCEPT);
@@ -2614,3 +2604,6 @@ int hci_cancel_inquiry(struct hci_dev *hdev)
 
        return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
 }
+
+module_param(enable_hs, bool, 0644);
+MODULE_PARM_DESC(enable_hs, "Enable High Speed");
index a89cf1f..918dc09 100644 (file)
@@ -45,7 +45,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-static int enable_le;
+static bool enable_le;
 
 /* Handle HCI Event packets */
 
@@ -55,8 +55,12 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s status 0x%x", hdev->name, status);
 
-       if (status)
+       if (status) {
+               hci_dev_lock(hdev);
+               mgmt_stop_discovery_failed(hdev, status);
+               hci_dev_unlock(hdev);
                return;
+       }
 
        clear_bit(HCI_INQUIRY, &hdev->flags);
 
@@ -190,6 +194,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
        clear_bit(HCI_RESET, &hdev->flags);
 
        hci_req_complete(hdev, HCI_OP_RESET, status);
+
+       hdev->dev_flags = 0;
 }
 
 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -494,7 +500,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
 
        /* CSR 1.1 dongles does not accept any bitfield so don't try to set
         * any event mask for pre 1.2 devices */
-       if (hdev->lmp_ver <= 1)
+       if (hdev->hci_ver < BLUETOOTH_VER_1_2)
                return;
 
        events[4] |= 0x01; /* Flow Specification Complete */
@@ -558,7 +564,7 @@ static void hci_setup(struct hci_dev *hdev)
 {
        hci_setup_event_mask(hdev);
 
-       if (hdev->lmp_ver > 1)
+       if (hdev->hci_ver > BLUETOOTH_VER_1_1)
                hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 
        if (hdev->features[6] & LMP_SIMPLE_PAIR) {
@@ -713,6 +719,21 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
        hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
 }
 
+static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
+                                               struct sk_buff *skb)
+{
+       struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (rp->status)
+               return;
+
+       hdev->flow_ctl_mode = rp->mode;
+
+       hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
+}
+
 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_rp_read_buffer_size *rp = (void *) skb->data;
@@ -927,6 +948,37 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
        hci_dev_unlock(hdev);
 }
 
+static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       hci_dev_lock(hdev);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
+                                                               rp->status);
+
+       hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       hci_dev_lock(hdev);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
+                                                               rp->status);
+
+       hci_dev_unlock(hdev);
+}
+
 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
                                                        struct sk_buff *skb)
 {
@@ -940,6 +992,13 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
        hci_dev_unlock(hdev);
 }
 
+static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+}
+
 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
                                        struct sk_buff *skb)
 {
@@ -956,12 +1015,16 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
                return;
 
        if (cp->enable == 0x01) {
+               set_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
                del_timer(&hdev->adv_timer);
 
                hci_dev_lock(hdev);
                hci_adv_entries_clear(hdev);
                hci_dev_unlock(hdev);
        } else if (cp->enable == 0x00) {
+               clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
                mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
        }
 }
@@ -1014,7 +1077,7 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
                hci_conn_check_pending(hdev);
                hci_dev_lock(hdev);
                if (test_bit(HCI_MGMT, &hdev->flags))
-                       mgmt_inquiry_failed(hdev, status);
+                       mgmt_start_discovery_failed(hdev, status);
                hci_dev_unlock(hdev);
                return;
        }
@@ -1437,7 +1500,7 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
                data.rssi               = 0x00;
                data.ssp_mode           = 0x00;
                hci_inquiry_cache_update(hdev, &data);
-               mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
+               mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                                info->dev_class, 0, NULL);
        }
 
@@ -1472,7 +1535,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                        conn->state = BT_CONFIG;
                        hci_conn_hold(conn);
                        conn->disc_timeout = HCI_DISCONN_TIMEOUT;
-                       mgmt_connected(hdev, &ev->bdaddr, conn->type);
+                       mgmt_connected(hdev, &ev->bdaddr, conn->type,
+                                                       conn->dst_type);
                } else
                        conn->state = BT_CONNECTED;
 
@@ -1494,7 +1558,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                }
 
                /* Set packet type for incoming connection */
-               if (!conn->out && hdev->hci_ver < 3) {
+               if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
                        struct hci_cp_change_conn_ptype cp;
                        cp.handle = ev->handle;
                        cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -1505,7 +1569,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                conn->state = BT_CLOSED;
                if (conn->type == ACL_LINK)
                        mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
-                                                               ev->status);
+                                               conn->dst_type, ev->status);
        }
 
        if (conn->type == ACL_LINK)
@@ -1604,26 +1668,27 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
 
        BT_DBG("%s status %d", hdev->name, ev->status);
 
-       if (ev->status) {
-               hci_dev_lock(hdev);
-               mgmt_disconnect_failed(hdev);
-               hci_dev_unlock(hdev);
-               return;
-       }
-
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
        if (!conn)
                goto unlock;
 
-       conn->state = BT_CLOSED;
+       if (ev->status == 0)
+               conn->state = BT_CLOSED;
 
-       if (conn->type == ACL_LINK || conn->type == LE_LINK)
-               mgmt_disconnected(hdev, &conn->dst, conn->type);
+       if (conn->type == ACL_LINK || conn->type == LE_LINK) {
+               if (ev->status != 0)
+                       mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
+               else
+                       mgmt_disconnected(hdev, &conn->dst, conn->type,
+                                                       conn->dst_type);
+       }
 
-       hci_proto_disconn_cfm(conn, ev->reason);
-       hci_conn_del(conn);
+       if (ev->status == 0) {
+               hci_proto_disconn_cfm(conn, ev->reason);
+               hci_conn_del(conn);
+       }
 
 unlock:
        hci_dev_unlock(hdev);
@@ -1961,6 +2026,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
                hci_cc_write_ca_timeout(hdev, skb);
                break;
 
+       case HCI_OP_READ_FLOW_CONTROL_MODE:
+               hci_cc_read_flow_control_mode(hdev, skb);
+               break;
+
        case HCI_OP_READ_LOCAL_AMP_INFO:
                hci_cc_read_local_amp_info(hdev, skb);
                break;
@@ -2009,6 +2078,17 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
                hci_cc_user_confirm_neg_reply(hdev, skb);
                break;
 
+       case HCI_OP_USER_PASSKEY_REPLY:
+               hci_cc_user_passkey_reply(hdev, skb);
+               break;
+
+       case HCI_OP_USER_PASSKEY_NEG_REPLY:
+               hci_cc_user_passkey_neg_reply(hdev, skb);
+
+       case HCI_OP_LE_SET_SCAN_PARAM:
+               hci_cc_le_set_scan_param(hdev, skb);
+               break;
+
        case HCI_OP_LE_SET_SCAN_ENABLE:
                hci_cc_le_set_scan_enable(hdev, skb);
                break;
@@ -2096,7 +2176,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        case HCI_OP_DISCONNECT:
                if (ev->status != 0)
-                       mgmt_disconnect_failed(hdev);
+                       mgmt_disconnect_failed(hdev, NULL, ev->status);
                break;
 
        case HCI_OP_LE_CREATE_CONN:
@@ -2444,7 +2524,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
                        data.rssi               = info->rssi;
                        data.ssp_mode           = 0x00;
                        hci_inquiry_cache_update(hdev, &data);
-                       mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
+                       mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                                info->dev_class, info->rssi,
                                                NULL);
                }
@@ -2461,7 +2541,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
                        data.rssi               = info->rssi;
                        data.ssp_mode           = 0x00;
                        hci_inquiry_cache_update(hdev, &data);
-                       mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
+                       mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                                info->dev_class, info->rssi,
                                                NULL);
                }
@@ -2604,7 +2684,7 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
                data.rssi               = info->rssi;
                data.ssp_mode           = 0x01;
                hci_inquiry_cache_update(hdev, &data);
-               mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
+               mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                info->dev_class, info->rssi, info->data);
        }
 
@@ -2768,6 +2848,21 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
+static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       struct hci_ev_user_passkey_req *ev = (void *) skb->data;
+
+       BT_DBG("%s", hdev->name);
+
+       hci_dev_lock(hdev);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_user_passkey_request(hdev, &ev->bdaddr);
+
+       hci_dev_unlock(hdev);
+}
+
 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
@@ -2868,14 +2963,15 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
        }
 
        if (ev->status) {
-               mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, ev->status);
+               mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+                                               conn->dst_type, ev->status);
                hci_proto_connect_cfm(conn, ev->status);
                conn->state = BT_CLOSED;
                hci_conn_del(conn);
                goto unlock;
        }
 
-       mgmt_connected(hdev, &ev->bdaddr, conn->type);
+       mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
 
        conn->sec_level = BT_SECURITY_LOW;
        conn->handle = __le16_to_cpu(ev->handle);
@@ -3106,6 +3202,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_user_confirm_request_evt(hdev, skb);
                break;
 
+       case HCI_EV_USER_PASSKEY_REQUEST:
+               hci_user_passkey_request_evt(hdev, skb);
+               break;
+
        case HCI_EV_SIMPLE_PAIR_COMPLETE:
                hci_simple_pair_complete_evt(hdev, skb);
                break;
index f6afe3d..78746cf 100644 (file)
@@ -49,7 +49,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-static int enable_mgmt;
+static bool enable_mgmt;
 
 /* ----- HCI socket interface ----- */
 
index e8a6837..9bc22e4 100644 (file)
@@ -56,8 +56,7 @@
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/smp.h>
 
-int disable_ertm;
-int enable_hs;
+bool disable_ertm;
 
 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
@@ -97,7 +96,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16
                        return c;
        }
        return NULL;
-
 }
 
 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
@@ -154,12 +152,9 @@ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
 
        list_for_each_entry(c, &chan_list, global_l) {
                if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
-                       goto found;
+                       return c;
        }
-
-       c = NULL;
-found:
-       return c;
+       return NULL;
 }
 
 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
@@ -234,8 +229,37 @@ static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
                chan_put(chan);
 }
 
+static char *state_to_string(int state)
+{
+       switch(state) {
+       case BT_CONNECTED:
+               return "BT_CONNECTED";
+       case BT_OPEN:
+               return "BT_OPEN";
+       case BT_BOUND:
+               return "BT_BOUND";
+       case BT_LISTEN:
+               return "BT_LISTEN";
+       case BT_CONNECT:
+               return "BT_CONNECT";
+       case BT_CONNECT2:
+               return "BT_CONNECT2";
+       case BT_CONFIG:
+               return "BT_CONFIG";
+       case BT_DISCONN:
+               return "BT_DISCONN";
+       case BT_CLOSED:
+               return "BT_CLOSED";
+       }
+
+       return "invalid state";
+}
+
 static void l2cap_state_change(struct l2cap_chan *chan, int state)
 {
+       BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
+                                               state_to_string(state));
+
        chan->state = state;
        chan->ops->state_change(chan->data, state);
 }
@@ -518,7 +542,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
 }
 
 /* Service level security */
-static inline int l2cap_check_security(struct l2cap_chan *chan)
+int l2cap_chan_check_security(struct l2cap_chan *chan)
 {
        struct l2cap_conn *conn = chan->conn;
        __u8 auth_type;
@@ -664,7 +688,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
                if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
                        return;
 
-               if (l2cap_check_security(chan) &&
+               if (l2cap_chan_check_security(chan) &&
                                __l2cap_no_conn_pending(chan)) {
                        struct l2cap_conn_req req;
                        req.scid = cpu_to_le16(chan->scid);
@@ -754,7 +778,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
                if (chan->state == BT_CONNECT) {
                        struct l2cap_conn_req req;
 
-                       if (!l2cap_check_security(chan) ||
+                       if (!l2cap_chan_check_security(chan) ||
                                        !__l2cap_no_conn_pending(chan)) {
                                bh_unlock_sock(sk);
                                continue;
@@ -787,7 +811,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
                        rsp.scid = cpu_to_le16(chan->dcid);
                        rsp.dcid = cpu_to_le16(chan->scid);
 
-                       if (l2cap_check_security(chan)) {
+                       if (l2cap_chan_check_security(chan)) {
                                if (bt_sk(sk)->defer_setup) {
                                        struct sock *parent = bt_sk(sk)->parent;
                                        rsp.result = cpu_to_le16(L2CAP_CR_PEND);
@@ -1181,7 +1205,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
        if (hcon->state == BT_CONNECTED) {
                if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
                        __clear_chan_timer(chan);
-                       if (l2cap_check_security(chan))
+                       if (l2cap_chan_check_security(chan))
                                l2cap_state_change(chan, BT_CONNECTED);
                } else
                        l2cap_do_start(chan);
@@ -1318,14 +1342,12 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
        if (!skb)
                return;
 
-       do {
-               if (bt_cb(skb)->tx_seq == tx_seq)
-                       break;
-
+       while (bt_cb(skb)->tx_seq != tx_seq) {
                if (skb_queue_is_last(&chan->tx_q, skb))
                        return;
 
-       } while ((skb = skb_queue_next(&chan->tx_q, skb)));
+               skb = skb_queue_next(&chan->tx_q, skb);
+       }
 
        if (chan->remote_max_tx &&
                        bt_cb(skb)->retries == chan->remote_max_tx) {
@@ -1906,7 +1928,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
 {
        struct l2cap_conf_efs efs;
 
-       switch(chan->mode) {
+       switch (chan->mode) {
        case L2CAP_MODE_ERTM:
                efs.id          = chan->local_id;
                efs.stype       = chan->local_stype;
@@ -2350,7 +2372,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
        void *ptr = req->data;
        int type, olen;
        unsigned long val;
-       struct l2cap_conf_rfc rfc;
+       struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
        struct l2cap_conf_efs efs;
 
        BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
@@ -2500,6 +2522,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
                }
        }
 
+       /* Use sane default values in case a misbehaving remote device
+        * did not send an RFC option.
+        */
+       rfc.mode = chan->mode;
+       rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+       rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+       rfc.max_pdu_size = cpu_to_le16(chan->imtu);
+
+       BT_ERR("Expected RFC option was not found, using defaults");
+
 done:
        switch (rfc.mode) {
        case L2CAP_MODE_ERTM:
@@ -2606,7 +2638,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
        chan->ident = cmd->ident;
 
        if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
-               if (l2cap_check_security(chan)) {
+               if (l2cap_chan_check_security(chan)) {
                        if (bt_sk(sk)->defer_setup) {
                                l2cap_state_change(chan, BT_CONNECT2);
                                result = L2CAP_CR_PEND;
@@ -3019,7 +3051,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
 
        /* don't delete l2cap channel if sk is owned by user */
        if (sock_owned_by_user(sk)) {
-               l2cap_state_change(chan,BT_DISCONN);
+               l2cap_state_change(chan, BT_DISCONN);
                __clear_chan_timer(chan);
                __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
                bh_unlock_sock(sk);
@@ -3562,14 +3594,10 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
        bt_cb(skb)->sar = sar;
 
        next_skb = skb_peek(&chan->srej_q);
-       if (!next_skb) {
-               __skb_queue_tail(&chan->srej_q, skb);
-               return 0;
-       }
 
        tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
 
-       do {
+       while (next_skb) {
                if (bt_cb(next_skb)->tx_seq == tx_seq)
                        return -EINVAL;
 
@@ -3582,9 +3610,10 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
                }
 
                if (skb_queue_is_last(&chan->srej_q, next_skb))
-                       break;
-
-       } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
+                       next_skb = NULL;
+               else
+                       next_skb = skb_queue_next(&chan->srej_q, next_skb);
+       }
 
        __skb_queue_tail(&chan->srej_q, skb);
 
@@ -3788,7 +3817,7 @@ static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
        }
 }
 
-static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
+static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
 {
        struct srej_list *new;
        u32 control;
@@ -3799,6 +3828,9 @@ static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
                l2cap_send_sframe(chan, control);
 
                new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
+               if (!new)
+                       return -ENOMEM;
+
                new->tx_seq = chan->expected_tx_seq;
 
                chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
@@ -3807,6 +3839,8 @@ static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
        }
 
        chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+
+       return 0;
 }
 
 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
@@ -3877,7 +3911,12 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_cont
                                        return 0;
                                }
                        }
-                       l2cap_send_srejframe(chan, tx_seq);
+
+                       err = l2cap_send_srejframe(chan, tx_seq);
+                       if (err < 0) {
+                               l2cap_send_disconn_req(chan->conn, chan, -err);
+                               return err;
+                       }
                }
        } else {
                expected_tx_seq_offset = __seq_offset(chan,
@@ -3899,7 +3938,11 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_cont
 
                set_bit(CONN_SEND_PBIT, &chan->conn_state);
 
-               l2cap_send_srejframe(chan, tx_seq);
+               err = l2cap_send_srejframe(chan, tx_seq);
+               if (err < 0) {
+                       l2cap_send_disconn_req(chan->conn, chan, -err);
+                       return err;
+               }
 
                __clear_ack_timer(chan);
        }
@@ -3928,11 +3971,12 @@ expected:
                        l2cap_retransmit_frames(chan);
        }
 
-       __set_ack_timer(chan);
 
        chan->num_acked = (chan->num_acked + 1) % num_to_ack;
        if (chan->num_acked == num_to_ack - 1)
                l2cap_send_ack(chan);
+       else
+               __set_ack_timer(chan);
 
        return 0;
 
@@ -4768,6 +4812,3 @@ void l2cap_exit(void)
 
 module_param(disable_ertm, bool, 0644);
 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
-
-module_param(enable_hs, bool, 0644);
-MODULE_PARM_DESC(enable_hs, "Enable High Speed");
index e2e785c..f737043 100644 (file)
@@ -626,8 +626,13 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
 
                chan->sec_level = sec.level;
 
+               if (!chan->conn)
+                       break;
+
                conn = chan->conn;
-               if (conn && chan->scid == L2CAP_CID_LE_DATA) {
+
+               /*change security for LE channels */
+               if (chan->scid == L2CAP_CID_LE_DATA) {
                        if (!conn->hcon->out) {
                                err = -EINVAL;
                                break;
@@ -635,9 +640,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
 
                        if (smp_conn_security(conn, sec.level))
                                break;
-
-                       err = 0;
                        sk->sk_state = BT_CONFIG;
+
+               /* or for ACL link, under defer_setup time */
+               } else if (sk->sk_state == BT_CONNECT2 &&
+                                       bt_sk(sk)->defer_setup) {
+                       err = l2cap_chan_check_security(chan);
+               } else {
+                       err = -EINVAL;
                }
                break;
 
index 94739d3..1ce549b 100644 (file)
@@ -22,6 +22,7 @@
 
 /* Bluetooth HCI Management interface */
 
+#include <linux/kernel.h>
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <asm/unaligned.h>
@@ -44,6 +45,79 @@ struct pending_cmd {
        void *user_data;
 };
 
+/* HCI to MGMT error code conversion table */
+static u8 mgmt_status_table[] = {
+       MGMT_STATUS_SUCCESS,
+       MGMT_STATUS_UNKNOWN_COMMAND,    /* Unknown Command */
+       MGMT_STATUS_NOT_CONNECTED,      /* No Connection */
+       MGMT_STATUS_FAILED,             /* Hardware Failure */
+       MGMT_STATUS_CONNECT_FAILED,     /* Page Timeout */
+       MGMT_STATUS_AUTH_FAILED,        /* Authentication Failed */
+       MGMT_STATUS_NOT_PAIRED,         /* PIN or Key Missing */
+       MGMT_STATUS_NO_RESOURCES,       /* Memory Full */
+       MGMT_STATUS_TIMEOUT,            /* Connection Timeout */
+       MGMT_STATUS_NO_RESOURCES,       /* Max Number of Connections */
+       MGMT_STATUS_NO_RESOURCES,       /* Max Number of SCO Connections */
+       MGMT_STATUS_ALREADY_CONNECTED,  /* ACL Connection Exists */
+       MGMT_STATUS_BUSY,               /* Command Disallowed */
+       MGMT_STATUS_NO_RESOURCES,       /* Rejected Limited Resources */
+       MGMT_STATUS_REJECTED,           /* Rejected Security */
+       MGMT_STATUS_REJECTED,           /* Rejected Personal */
+       MGMT_STATUS_TIMEOUT,            /* Host Timeout */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Feature */
+       MGMT_STATUS_INVALID_PARAMS,     /* Invalid Parameters */
+       MGMT_STATUS_DISCONNECTED,       /* OE User Ended Connection */
+       MGMT_STATUS_NO_RESOURCES,       /* OE Low Resources */
+       MGMT_STATUS_DISCONNECTED,       /* OE Power Off */
+       MGMT_STATUS_DISCONNECTED,       /* Connection Terminated */
+       MGMT_STATUS_BUSY,               /* Repeated Attempts */
+       MGMT_STATUS_REJECTED,           /* Pairing Not Allowed */
+       MGMT_STATUS_FAILED,             /* Unknown LMP PDU */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Remote Feature */
+       MGMT_STATUS_REJECTED,           /* SCO Offset Rejected */
+       MGMT_STATUS_REJECTED,           /* SCO Interval Rejected */
+       MGMT_STATUS_REJECTED,           /* Air Mode Rejected */
+       MGMT_STATUS_INVALID_PARAMS,     /* Invalid LMP Parameters */
+       MGMT_STATUS_FAILED,             /* Unspecified Error */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported LMP Parameter Value */
+       MGMT_STATUS_FAILED,             /* Role Change Not Allowed */
+       MGMT_STATUS_TIMEOUT,            /* LMP Response Timeout */
+       MGMT_STATUS_FAILED,             /* LMP Error Transaction Collision */
+       MGMT_STATUS_FAILED,             /* LMP PDU Not Allowed */
+       MGMT_STATUS_REJECTED,           /* Encryption Mode Not Accepted */
+       MGMT_STATUS_FAILED,             /* Unit Link Key Used */
+       MGMT_STATUS_NOT_SUPPORTED,      /* QoS Not Supported */
+       MGMT_STATUS_TIMEOUT,            /* Instant Passed */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Pairing Not Supported */
+       MGMT_STATUS_FAILED,             /* Transaction Collision */
+       MGMT_STATUS_INVALID_PARAMS,     /* Unacceptable Parameter */
+       MGMT_STATUS_REJECTED,           /* QoS Rejected */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Classification Not Supported */
+       MGMT_STATUS_REJECTED,           /* Insufficient Security */
+       MGMT_STATUS_INVALID_PARAMS,     /* Parameter Out Of Range */
+       MGMT_STATUS_BUSY,               /* Role Switch Pending */
+       MGMT_STATUS_FAILED,             /* Slot Violation */
+       MGMT_STATUS_FAILED,             /* Role Switch Failed */
+       MGMT_STATUS_INVALID_PARAMS,     /* EIR Too Large */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Simple Pairing Not Supported */
+       MGMT_STATUS_BUSY,               /* Host Busy Pairing */
+       MGMT_STATUS_REJECTED,           /* Rejected, No Suitable Channel */
+       MGMT_STATUS_BUSY,               /* Controller Busy */
+       MGMT_STATUS_INVALID_PARAMS,     /* Unsuitable Connection Interval */
+       MGMT_STATUS_TIMEOUT,            /* Directed Advertising Timeout */
+       MGMT_STATUS_AUTH_FAILED,        /* Terminated Due to MIC Failure */
+       MGMT_STATUS_CONNECT_FAILED,     /* Connection Establishment Failed */
+       MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
+};
+
+static u8 mgmt_status(u8 hci_status)
+{
+       if (hci_status < ARRAY_SIZE(mgmt_status_table))
+               return mgmt_status_table[hci_status];
+
+       return MGMT_STATUS_FAILED;
+}
+
 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
 {
        struct sk_buff *skb;
@@ -178,7 +252,8 @@ static int read_controller_info(struct sock *sk, u16 index)
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_READ_INFO,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
                cancel_delayed_work_sync(&hdev->power_off);
@@ -291,6 +366,15 @@ static void mgmt_pending_remove(struct pending_cmd *cmd)
        mgmt_pending_free(cmd);
 }
 
+static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
+{
+       struct mgmt_mode rp;
+
+       rp.val = val;
+
+       return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
+}
+
 static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
 {
        struct mgmt_mode *cp;
@@ -303,22 +387,25 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_POWERED,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_POWERED,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        up = test_bit(HCI_UP, &hdev->flags);
        if ((cp->val && up) || (!cp->val && !up)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
+               err = send_mode_rsp(sk, index, MGMT_OP_SET_POWERED, cp->val);
                goto failed;
        }
 
        if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
+               err = cmd_status(sk, index, MGMT_OP_SET_POWERED,
+                                                       MGMT_STATUS_BUSY);
                goto failed;
        }
 
@@ -355,28 +442,33 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
+               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+                                               MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
        if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
                        mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
+               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+                                                       MGMT_STATUS_BUSY);
                goto failed;
        }
 
        if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
                                        test_bit(HCI_PSCAN, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
+               err = send_mode_rsp(sk, index, MGMT_OP_SET_DISCOVERABLE,
+                                                               cp->val);
                goto failed;
        }
 
@@ -421,27 +513,32 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
+               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+                                               MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
        if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
                        mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
+               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+                                                       MGMT_STATUS_BUSY);
                goto failed;
        }
 
        if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
+               err = send_mode_rsp(sk, index, MGMT_OP_SET_CONNECTABLE,
+                                                               cp->val);
                goto failed;
        }
 
@@ -496,15 +593,6 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data,
        return 0;
 }
 
-static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
-{
-       struct mgmt_mode rp;
-
-       rp.val = val;
-
-       return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
-}
-
 static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
                                                                        u16 len)
 {
@@ -517,11 +605,13 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -730,11 +820,13 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_ADD_UUID,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_ADD_UUID,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -779,11 +871,13 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -805,7 +899,8 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
        }
 
        if (found == 0) {
-               err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
+               err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+                                               MGMT_STATUS_INVALID_PARAMS);
                goto unlock;
        }
 
@@ -838,11 +933,13 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -870,11 +967,13 @@ static int set_service_cache(struct sock *sk, u16 index,  unsigned char *data,
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -914,7 +1013,8 @@ static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
        cp = (void *) data;
 
        if (len < sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        key_count = get_unaligned_le16(&cp->key_count);
 
@@ -923,12 +1023,14 @@ static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
        if (expected_len != len) {
                BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
                                                        len, expected_len);
-               return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+                                               MGMT_STATUS_INVALID_PARAMS);
        }
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
                                                                key_count);
@@ -951,6 +1053,8 @@ static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
                                                                key->pin_len);
        }
 
+       cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0);
+
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
 
@@ -962,41 +1066,64 @@ static int remove_keys(struct sock *sk, u16 index, unsigned char *data,
 {
        struct hci_dev *hdev;
        struct mgmt_cp_remove_keys *cp;
+       struct mgmt_rp_remove_keys rp;
+       struct hci_cp_disconnect dc;
+       struct pending_cmd *cmd;
        struct hci_conn *conn;
        int err;
 
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
+       memset(&rp, 0, sizeof(rp));
+       bacpy(&rp.bdaddr, &cp->bdaddr);
+       rp.status = MGMT_STATUS_FAILED;
+
        err = hci_remove_link_key(hdev, &cp->bdaddr);
        if (err < 0) {
-               err = cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, -err);
+               rp.status = MGMT_STATUS_NOT_PAIRED;
                goto unlock;
        }
 
-       err = 0;
-
-       if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
+       if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) {
+               err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+                                                               sizeof(rp));
                goto unlock;
+       }
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
-       if (conn) {
-               struct hci_cp_disconnect dc;
+       if (!conn) {
+               err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+                                                               sizeof(rp));
+               goto unlock;
+       }
 
-               put_unaligned_le16(conn->handle, &dc.handle);
-               dc.reason = 0x13; /* Remote User Terminated Connection */
-               err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+       cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_KEYS, hdev, cp, sizeof(*cp));
+       if (!cmd) {
+               err = -ENOMEM;
+               goto unlock;
        }
 
+       put_unaligned_le16(conn->handle, &dc.handle);
+       dc.reason = 0x13; /* Remote User Terminated Connection */
+       err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
 unlock:
+       if (err < 0)
+               err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+                                                               sizeof(rp));
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
 
@@ -1017,21 +1144,25 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_DISCONNECT,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_DISCONNECT,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
+               err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+                                               MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
        if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
-               err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
+               err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+                                                       MGMT_STATUS_BUSY);
                goto failed;
        }
 
@@ -1040,7 +1171,8 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
                conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
 
        if (!conn) {
-               err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
+               err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+                                               MGMT_STATUS_NOT_CONNECTED);
                goto failed;
        }
 
@@ -1064,11 +1196,18 @@ failed:
        return err;
 }
 
-static u8 link_to_mgmt(u8 link_type)
+static u8 link_to_mgmt(u8 link_type, u8 addr_type)
 {
        switch (link_type) {
        case LE_LINK:
-               return MGMT_ADDR_LE;
+               switch (addr_type) {
+               case ADDR_LE_DEV_PUBLIC:
+                       return MGMT_ADDR_LE_PUBLIC;
+               case ADDR_LE_DEV_RANDOM:
+                       return MGMT_ADDR_LE_RANDOM;
+               default:
+                       return MGMT_ADDR_INVALID;
+               }
        case ACL_LINK:
                return MGMT_ADDR_BREDR;
        default:
@@ -1090,7 +1229,8 @@ static int get_connections(struct sock *sk, u16 index)
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -1111,7 +1251,7 @@ static int get_connections(struct sock *sk, u16 index)
        i = 0;
        list_for_each_entry(c, &hdev->conn_hash.list, list) {
                bacpy(&rp->addr[i].bdaddr, &c->dst);
-               rp->addr[i].type = link_to_mgmt(c->type);
+               rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
                if (rp->addr[i].type == MGMT_ADDR_INVALID)
                        continue;
                i++;
@@ -1164,22 +1304,26 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
+               err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+                                               MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
        if (!conn) {
-               err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN);
+               err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+                                               MGMT_STATUS_NOT_CONNECTED);
                goto failed;
        }
 
@@ -1191,7 +1335,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
                err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
                if (err >= 0)
                        err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
-                                                               EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
                goto failed;
        }
@@ -1230,18 +1374,18 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
-                                                                       EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
-                                                                       ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
                err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
-                                                               ENETDOWN);
+                                               MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
@@ -1265,11 +1409,13 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -1307,7 +1453,8 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
        struct mgmt_rp_pair_device rp;
        struct hci_conn *conn = cmd->user_data;
 
-       bacpy(&rp.bdaddr, &conn->dst);
+       bacpy(&rp.addr.bdaddr, &conn->dst);
+       rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
        rp.status = status;
 
        cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
@@ -1325,27 +1472,22 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
 {
        struct pending_cmd *cmd;
-       struct hci_dev *hdev = conn->hdev;
 
        BT_DBG("status %u", status);
 
-       hci_dev_lock_bh(hdev);
-
        cmd = find_pairing(conn);
        if (!cmd)
                BT_DBG("Unable to find a pending command");
        else
                pairing_complete(cmd, status);
-
-       hci_dev_unlock_bh(hdev);
 }
 
 static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
 {
        struct hci_dev *hdev;
        struct mgmt_cp_pair_device *cp;
+       struct mgmt_rp_pair_device rp;
        struct pending_cmd *cmd;
-       struct adv_entry *entry;
        u8 sec_level, auth_type;
        struct hci_conn *conn;
        int err;
@@ -1355,11 +1497,13 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -1369,22 +1513,29 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
        else
                auth_type = HCI_AT_DEDICATED_BONDING_MITM;
 
-       entry = hci_find_adv_entry(hdev, &cp->bdaddr);
-       if (entry)
-               conn = hci_connect(hdev, LE_LINK, &cp->bdaddr, sec_level,
+       if (cp->addr.type == MGMT_ADDR_BREDR)
+               conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
                                                                auth_type);
        else
-               conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level,
+               conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
                                                                auth_type);
 
+       memset(&rp, 0, sizeof(rp));
+       bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+       rp.addr.type = cp->addr.type;
+
        if (IS_ERR(conn)) {
-               err = PTR_ERR(conn);
+               rp.status = -PTR_ERR(conn);
+               err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
+                                                       &rp, sizeof(rp));
                goto unlock;
        }
 
        if (conn->connect_cfm_cb) {
                hci_conn_put(conn);
-               err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
+               rp.status = EBUSY;
+               err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
+                                                       &rp, sizeof(rp));
                goto unlock;
        }
 
@@ -1396,7 +1547,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
        }
 
        /* For LE, just connecting isn't a proof that the pairing finished */
-       if (!entry)
+       if (cp->addr.type == MGMT_ADDR_BREDR)
                conn->connect_cfm_cb = pairing_complete_cb;
 
        conn->security_cfm_cb = pairing_complete_cb;
@@ -1417,56 +1568,138 @@ unlock:
        return err;
 }
 
-static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
-                                                       u16 len, int success)
+static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr,
+                                       u16 mgmt_op, u16 hci_op, __le32 passkey)
 {
-       struct mgmt_cp_user_confirm_reply *cp = (void *) data;
-       u16 mgmt_op, hci_op;
        struct pending_cmd *cmd;
        struct hci_dev *hdev;
+       struct hci_conn *conn;
        int err;
 
-       BT_DBG("");
-
-       if (success) {
-               mgmt_op = MGMT_OP_USER_CONFIRM_REPLY;
-               hci_op = HCI_OP_USER_CONFIRM_REPLY;
-       } else {
-               mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY;
-               hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
-       }
-
-       if (len != sizeof(*cp))
-               return cmd_status(sk, index, mgmt_op, EINVAL);
-
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, mgmt_op, ENODEV);
+               return cmd_status(sk, index, mgmt_op,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, index, mgmt_op, ENETDOWN);
-               goto failed;
+               err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED);
+               goto done;
        }
 
-       cmd = mgmt_pending_add(sk, mgmt_op, hdev, data, len);
+       /*
+        * Check for an existing ACL link, if present pair via
+        * HCI commands.
+        *
+        * If no ACL link is present, check for an LE link and if
+        * present, pair via the SMP engine.
+        *
+        * If neither ACL nor LE links are present, fail with error.
+        */
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
+       if (!conn) {
+               conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
+               if (!conn) {
+                       err = cmd_status(sk, index, mgmt_op,
+                                               MGMT_STATUS_NOT_CONNECTED);
+                       goto done;
+               }
+
+               /* Continue with pairing via SMP */
+
+               err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_SUCCESS);
+               goto done;
+       }
+
+       cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
        if (!cmd) {
                err = -ENOMEM;
-               goto failed;
+               goto done;
        }
 
-       err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
+       /* Continue with pairing via HCI */
+       if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
+               struct hci_cp_user_passkey_reply cp;
+
+               bacpy(&cp.bdaddr, bdaddr);
+               cp.passkey = passkey;
+               err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
+       } else
+               err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
+
        if (err < 0)
                mgmt_pending_remove(cmd);
 
-failed:
+done:
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
 
        return err;
 }
 
+static int user_confirm_reply(struct sock *sk, u16 index, void *data, u16 len)
+{
+       struct mgmt_cp_user_confirm_reply *cp = (void *) data;
+
+       BT_DBG("");
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_REPLY,
+                                               MGMT_STATUS_INVALID_PARAMS);
+
+       return user_pairing_resp(sk, index, &cp->bdaddr,
+                       MGMT_OP_USER_CONFIRM_REPLY,
+                       HCI_OP_USER_CONFIRM_REPLY, 0);
+}
+
+static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data,
+                                                                       u16 len)
+{
+       struct mgmt_cp_user_confirm_reply *cp = (void *) data;
+
+       BT_DBG("");
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_NEG_REPLY,
+                                               MGMT_STATUS_INVALID_PARAMS);
+
+       return user_pairing_resp(sk, index, &cp->bdaddr,
+                       MGMT_OP_USER_CONFIRM_NEG_REPLY,
+                       HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
+}
+
+static int user_passkey_reply(struct sock *sk, u16 index, void *data, u16 len)
+{
+       struct mgmt_cp_user_passkey_reply *cp = (void *) data;
+
+       BT_DBG("");
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_REPLY,
+                                                                       EINVAL);
+
+       return user_pairing_resp(sk, index, &cp->bdaddr,
+                       MGMT_OP_USER_PASSKEY_REPLY,
+                       HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
+}
+
+static int user_passkey_neg_reply(struct sock *sk, u16 index, void *data,
+                                                                       u16 len)
+{
+       struct mgmt_cp_user_passkey_neg_reply *cp = (void *) data;
+
+       BT_DBG("");
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_NEG_REPLY,
+                                                                       EINVAL);
+
+       return user_pairing_resp(sk, index, &cp->bdaddr,
+                       MGMT_OP_USER_PASSKEY_NEG_REPLY,
+                       HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
+}
+
 static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
                                                                u16 len)
 {
@@ -1479,11 +1712,13 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
        BT_DBG("");
 
        if (len != sizeof(*mgmt_cp))
-               return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -1517,24 +1752,25 @@ static int read_local_oob_data(struct sock *sk, u16 index)
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                                                       ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
                err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                                               ENETDOWN);
+                                               MGMT_STATUS_NOT_POWERED);
                goto unlock;
        }
 
        if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
                err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                                               EOPNOTSUPP);
+                                               MGMT_STATUS_NOT_SUPPORTED);
                goto unlock;
        }
 
        if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
-               err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
+               err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
+                                                       MGMT_STATUS_BUSY);
                goto unlock;
        }
 
@@ -1566,19 +1802,20 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                                                       EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                                                       ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
                                                                cp->randomizer);
        if (err < 0)
-               err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
+               err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                                       MGMT_STATUS_FAILED);
        else
                err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
                                                                        0);
@@ -1600,19 +1837,19 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-                                                                       EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-                                                                       ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
        if (err < 0)
                err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-                                                                       -err);
+                                               MGMT_STATUS_INVALID_PARAMS);
        else
                err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
                                                                NULL, 0);
@@ -1623,22 +1860,30 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
        return err;
 }
 
-static int start_discovery(struct sock *sk, u16 index)
+static int start_discovery(struct sock *sk, u16 index,
+                                               unsigned char *data, u16 len)
 {
+       struct mgmt_cp_start_discovery *cp = (void *) data;
        struct pending_cmd *cmd;
        struct hci_dev *hdev;
        int err;
 
        BT_DBG("hci%u", index);
 
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+                                               MGMT_STATUS_INVALID_PARAMS);
+
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENETDOWN);
+               err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+                                               MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
@@ -1669,7 +1914,8 @@ static int stop_discovery(struct sock *sk, u16 index)
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -1701,18 +1947,19 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
-                                                       EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
-                                                       ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        err = hci_blacklist_add(hdev, &cp->bdaddr);
        if (err < 0)
-               err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
+               err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
+                                                       MGMT_STATUS_FAILED);
        else
                err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
                                                        NULL, 0);
@@ -1734,19 +1981,20 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
-                                                               EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
-                                                               ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        err = hci_blacklist_del(hdev, &cp->bdaddr);
 
        if (err < 0)
-               err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err);
+               err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
+                                               MGMT_STATUS_INVALID_PARAMS);
        else
                err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
                                                                NULL, 0);
@@ -1770,12 +2018,12 @@ static int set_fast_connectable(struct sock *sk, u16 index,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-                                                               EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-                                                               ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
@@ -1793,14 +2041,14 @@ static int set_fast_connectable(struct sock *sk, u16 index,
                                                sizeof(acp), &acp);
        if (err < 0) {
                err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-                                                               -err);
+                                                       MGMT_STATUS_FAILED);
                goto done;
        }
 
        err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
        if (err < 0) {
                err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-                                                               -err);
+                                                       MGMT_STATUS_FAILED);
                goto done;
        }
 
@@ -1903,10 +2151,18 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
                err = pair_device(sk, index, buf + sizeof(*hdr), len);
                break;
        case MGMT_OP_USER_CONFIRM_REPLY:
-               err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1);
+               err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len);
                break;
        case MGMT_OP_USER_CONFIRM_NEG_REPLY:
-               err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
+               err = user_confirm_neg_reply(sk, index, buf + sizeof(*hdr),
+                                                                       len);
+               break;
+       case MGMT_OP_USER_PASSKEY_REPLY:
+               err = user_passkey_reply(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_USER_PASSKEY_NEG_REPLY:
+               err = user_passkey_neg_reply(sk, index, buf + sizeof(*hdr),
+                                                                       len);
                break;
        case MGMT_OP_SET_LOCAL_NAME:
                err = set_local_name(sk, index, buf + sizeof(*hdr), len);
@@ -1922,7 +2178,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
                                                                        len);
                break;
        case MGMT_OP_START_DISCOVERY:
-               err = start_discovery(sk, index);
+               err = start_discovery(sk, index, buf + sizeof(*hdr), len);
                break;
        case MGMT_OP_STOP_DISCOVERY:
                err = stop_discovery(sk, index);
@@ -1939,7 +2195,8 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
                break;
        default:
                BT_DBG("Unknown op %u", opcode);
-               err = cmd_status(sk, index, opcode, 0x01);
+               err = cmd_status(sk, index, opcode,
+                                               MGMT_STATUS_UNKNOWN_COMMAND);
                break;
        }
 
@@ -2062,13 +2319,15 @@ int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
 
 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
 {
+       u8 mgmt_err = mgmt_status(status);
+
        if (scan & SCAN_PAGE)
                mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
-                                               cmd_status_rsp, &status);
+                                               cmd_status_rsp, &mgmt_err);
 
        if (scan & SCAN_INQUIRY)
                mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
-                                               cmd_status_rsp, &status);
+                                               cmd_status_rsp, &mgmt_err);
 
        return 0;
 }
@@ -2089,12 +2348,13 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
        return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type)
+int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                                               u8 addr_type)
 {
        struct mgmt_addr_info ev;
 
        bacpy(&ev.bdaddr, bdaddr);
-       ev.type = link_to_mgmt(link_type);
+       ev.type = link_to_mgmt(link_type, addr_type);
 
        return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL);
 }
@@ -2106,6 +2366,7 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
        struct mgmt_rp_disconnect rp;
 
        bacpy(&rp.bdaddr, &cp->bdaddr);
+       rp.status = 0;
 
        cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
 
@@ -2115,7 +2376,25 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
        mgmt_pending_remove(cmd);
 }
 
-int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+static void remove_keys_rsp(struct pending_cmd *cmd, void *data)
+{
+       u8 *status = data;
+       struct mgmt_cp_remove_keys *cp = cmd->param;
+       struct mgmt_rp_remove_keys rp;
+
+       memset(&rp, 0, sizeof(rp));
+       bacpy(&rp.bdaddr, &cp->bdaddr);
+       if (status != NULL)
+               rp.status = *status;
+
+       cmd_complete(cmd->sk, cmd->index, MGMT_OP_REMOVE_KEYS, &rp,
+                                                               sizeof(rp));
+
+       mgmt_pending_remove(cmd);
+}
+
+int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                                               u8 addr_type)
 {
        struct mgmt_addr_info ev;
        struct sock *sk = NULL;
@@ -2124,40 +2403,53 @@ int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
        mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
 
        bacpy(&ev.bdaddr, bdaddr);
-       ev.type = link_to_mgmt(type);
+       ev.type = link_to_mgmt(link_type, addr_type);
 
        err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
 
        if (sk)
                sock_put(sk);
 
+       mgmt_pending_foreach(MGMT_OP_REMOVE_KEYS, hdev, remove_keys_rsp, NULL);
+
        return err;
 }
 
-int mgmt_disconnect_failed(struct hci_dev *hdev)
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
 {
        struct pending_cmd *cmd;
+       u8 mgmt_err = mgmt_status(status);
        int err;
 
        cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
        if (!cmd)
                return -ENOENT;
 
-       err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT, EIO);
+       if (bdaddr) {
+               struct mgmt_rp_disconnect rp;
+
+               bacpy(&rp.bdaddr, bdaddr);
+               rp.status = status;
+
+               err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
+                                                       &rp, sizeof(rp));
+       } else
+               err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT,
+                                                               mgmt_err);
 
        mgmt_pending_remove(cmd);
 
        return err;
 }
 
-int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type,
-                                                               u8 status)
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                               u8 addr_type, u8 status)
 {
        struct mgmt_ev_connect_failed ev;
 
        bacpy(&ev.addr.bdaddr, bdaddr);
-       ev.addr.type = link_to_mgmt(type);
-       ev.status = status;
+       ev.addr.type = link_to_mgmt(link_type, addr_type);
+       ev.status = mgmt_status(status);
 
        return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
 }
@@ -2185,7 +2477,7 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                return -ENOENT;
 
        bacpy(&rp.bdaddr, bdaddr);
-       rp.status = status;
+       rp.status = mgmt_status(status);
 
        err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp,
                                                                sizeof(rp));
@@ -2207,7 +2499,7 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                return -ENOENT;
 
        bacpy(&rp.bdaddr, bdaddr);
-       rp.status = status;
+       rp.status = mgmt_status(status);
 
        err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
                                                                sizeof(rp));
@@ -2232,7 +2524,19 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                                                        NULL);
 }
 
-static int confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+       struct mgmt_ev_user_passkey_request ev;
+
+       BT_DBG("%s", hdev->name);
+
+       bacpy(&ev.bdaddr, bdaddr);
+
+       return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
+                                                                       NULL);
+}
+
+static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                                        u8 status, u8 opcode)
 {
        struct pending_cmd *cmd;
@@ -2244,7 +2548,7 @@ static int confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                return -ENOENT;
 
        bacpy(&rp.bdaddr, bdaddr);
-       rp.status = status;
+       rp.status = mgmt_status(status);
        err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp));
 
        mgmt_pending_remove(cmd);
@@ -2255,23 +2559,37 @@ static int confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                                                u8 status)
 {
-       return confirm_reply_complete(hdev, bdaddr, status,
+       return user_pairing_resp_complete(hdev, bdaddr, status,
                                                MGMT_OP_USER_CONFIRM_REPLY);
 }
 
 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
                                                bdaddr_t *bdaddr, u8 status)
 {
-       return confirm_reply_complete(hdev, bdaddr, status,
+       return user_pairing_resp_complete(hdev, bdaddr, status,
                                        MGMT_OP_USER_CONFIRM_NEG_REPLY);
 }
 
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                                               u8 status)
+{
+       return user_pairing_resp_complete(hdev, bdaddr, status,
+                                               MGMT_OP_USER_PASSKEY_REPLY);
+}
+
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
+                                               bdaddr_t *bdaddr, u8 status)
+{
+       return user_pairing_resp_complete(hdev, bdaddr, status,
+                                       MGMT_OP_USER_PASSKEY_NEG_REPLY);
+}
+
 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
 {
        struct mgmt_ev_auth_failed ev;
 
        bacpy(&ev.bdaddr, bdaddr);
-       ev.status = status;
+       ev.status = mgmt_status(status);
 
        return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
 }
@@ -2291,7 +2609,7 @@ int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
 
        if (status) {
                err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
-                                                                       EIO);
+                                                       mgmt_status(status));
                goto failed;
        }
 
@@ -2326,7 +2644,8 @@ int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
 
        if (status) {
                err = cmd_status(cmd->sk, hdev->id,
-                                       MGMT_OP_READ_LOCAL_OOB_DATA, EIO);
+                                               MGMT_OP_READ_LOCAL_OOB_DATA,
+                                               mgmt_status(status));
        } else {
                struct mgmt_rp_read_local_oob_data rp;
 
@@ -2343,15 +2662,15 @@ int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
        return err;
 }
 
-int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type,
-                                       u8 *dev_class, s8 rssi, u8 *eir)
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                               u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir)
 {
        struct mgmt_ev_device_found ev;
 
        memset(&ev, 0, sizeof(ev));
 
        bacpy(&ev.addr.bdaddr, bdaddr);
-       ev.addr.type = link_to_mgmt(type);
+       ev.addr.type = link_to_mgmt(link_type, addr_type);
        ev.rssi = rssi;
 
        if (eir)
@@ -2375,7 +2694,7 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name)
        return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_inquiry_failed(struct hci_dev *hdev, u8 status)
+int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
 {
        struct pending_cmd *cmd;
        int err;
@@ -2384,6 +2703,21 @@ int mgmt_inquiry_failed(struct hci_dev *hdev, u8 status)
        if (!cmd)
                return -ENOENT;
 
+       err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status));
+       mgmt_pending_remove(cmd);
+
+       return err;
+}
+
+int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+       struct pending_cmd *cmd;
+       int err;
+
+       cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+       if (!cmd)
+               return -ENOENT;
+
        err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
        mgmt_pending_remove(cmd);
 
index 8743f36..eac849b 100644 (file)
@@ -51,8 +51,8 @@
 
 #define VERSION "1.11"
 
-static int disable_cfc;
-static int l2cap_ertm;
+static bool disable_cfc;
+static bool l2cap_ertm;
 static int channel_mtu = -1;
 static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
 
@@ -1162,6 +1162,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
                        if (list_empty(&s->dlcs)) {
                                s->state = BT_DISCONN;
                                rfcomm_send_disc(s, 0);
+                               rfcomm_session_clear_timer(s);
                        }
 
                        break;
index a324b00..a0d11b8 100644 (file)
@@ -51,7 +51,7 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/sco.h>
 
-static int disable_esco;
+static bool disable_esco;
 
 static const struct proto_ops sco_sock_ops;
 
index 94e94ca..0b96737 100644 (file)
@@ -232,6 +232,18 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
        return 0;
 }
 
+static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
+{
+       if (send)
+               smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
+                                                               &reason);
+
+       clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend);
+       mgmt_auth_failed(conn->hcon->hdev, conn->dst, reason);
+       del_timer(&conn->security_timer);
+       smp_chan_destroy(conn);
+}
+
 static void confirm_work(struct work_struct *work)
 {
        struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
@@ -270,8 +282,7 @@ static void confirm_work(struct work_struct *work)
        return;
 
 error:
-       smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
-       smp_chan_destroy(conn);
+       smp_failure(conn, reason, 1);
 }
 
 static void random_work(struct work_struct *work)
@@ -354,8 +365,7 @@ static void random_work(struct work_struct *work)
        return;
 
 error:
-       smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
-       smp_chan_destroy(conn);
+       smp_failure(conn, reason, 1);
 }
 
 static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
@@ -379,7 +389,15 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
 
 void smp_chan_destroy(struct l2cap_conn *conn)
 {
-       kfree(conn->smp_chan);
+       struct smp_chan *smp = conn->smp_chan;
+
+       clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
+
+       if (smp->tfm)
+               crypto_free_blkcipher(smp->tfm);
+
+       kfree(smp);
+       conn->smp_chan = NULL;
        hci_conn_put(conn->hcon);
 }
 
@@ -647,6 +665,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
                break;
 
        case SMP_CMD_PAIRING_FAIL:
+               smp_failure(conn, skb->data[0], 0);
                reason = 0;
                err = -EPERM;
                break;
@@ -692,8 +711,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
 
 done:
        if (reason)
-               smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
-                                                               &reason);
+               smp_failure(conn, reason, 1);
 
        kfree_skb(skb);
        return err;
index f20c4fd..ba780cc 100644 (file)
@@ -62,7 +62,7 @@ static int __init br_init(void)
 
        brioctl_set(br_ioctl_deviceless_stub);
 
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
        br_fdb_test_addr_hook = br_fdb_test_addr;
 #endif
 
@@ -93,7 +93,7 @@ static void __exit br_deinit(void)
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 
        br_netfilter_fini();
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
        br_fdb_test_addr_hook = NULL;
 #endif
 
index a3754ac..71773b0 100644 (file)
@@ -170,8 +170,11 @@ static int br_set_mac_address(struct net_device *dev, void *p)
                return -EINVAL;
 
        spin_lock_bh(&br->lock);
-       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-       br_stp_change_bridge_id(br, addr->sa_data);
+       if (compare_ether_addr(dev->dev_addr, addr->sa_data)) {
+               memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+               br_fdb_change_mac_address(br, addr->sa_data);
+               br_stp_change_bridge_id(br, addr->sa_data);
+       }
        br->flags |= BR_SET_MAC_ADDR;
        spin_unlock_bh(&br->lock);
 
index 973813e..f963f6b 100644 (file)
@@ -28,7 +28,8 @@
 static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                      const unsigned char *addr);
-static void fdb_notify(const struct net_bridge_fdb_entry *, int);
+static void fdb_notify(struct net_bridge *br,
+                      const struct net_bridge_fdb_entry *, int);
 
 static u32 fdb_salt __read_mostly;
 
@@ -80,10 +81,10 @@ static void fdb_rcu_free(struct rcu_head *head)
        kmem_cache_free(br_fdb_cache, ent);
 }
 
-static inline void fdb_delete(struct net_bridge_fdb_entry *f)
+static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 {
-       fdb_notify(f, RTM_DELNEIGH);
        hlist_del_rcu(&f->hlist);
+       fdb_notify(br, f, RTM_DELNEIGH);
        call_rcu(&f->rcu, fdb_rcu_free);
 }
 
@@ -114,7 +115,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
                                }
 
                                /* delete old one */
-                               fdb_delete(f);
+                               fdb_delete(br, f);
                                goto insert;
                        }
                }
@@ -126,6 +127,18 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
        spin_unlock_bh(&br->hash_lock);
 }
 
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
+{
+       struct net_bridge_fdb_entry *f;
+
+       /* If old entry was unassociated with any port, then delete it. */
+       f = __br_fdb_get(br, br->dev->dev_addr);
+       if (f && f->is_local && !f->dst)
+               fdb_delete(br, f);
+
+       fdb_insert(br, NULL, newaddr);
+}
+
 void br_fdb_cleanup(unsigned long _data)
 {
        struct net_bridge *br = (struct net_bridge *)_data;
@@ -144,7 +157,7 @@ void br_fdb_cleanup(unsigned long _data)
                                continue;
                        this_timer = f->updated + delay;
                        if (time_before_eq(this_timer, jiffies))
-                               fdb_delete(f);
+                               fdb_delete(br, f);
                        else if (time_before(this_timer, next_timer))
                                next_timer = this_timer;
                }
@@ -165,7 +178,7 @@ void br_fdb_flush(struct net_bridge *br)
                struct hlist_node *h, *n;
                hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
                        if (!f->is_static)
-                               fdb_delete(f);
+                               fdb_delete(br, f);
                }
        }
        spin_unlock_bh(&br->hash_lock);
@@ -209,7 +222,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
                                }
                        }
 
-                       fdb_delete(f);
+                       fdb_delete(br, f);
                skip_delete: ;
                }
        }
@@ -234,7 +247,7 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
        return NULL;
 }
 
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
 /* Interface used by ATM LANE hook to test
  * if an addr is on some other bridge port */
 int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
@@ -249,7 +262,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
                ret = 0;
        else {
                fdb = __br_fdb_get(port->br, addr);
-               ret = fdb && fdb->dst->dev != dev &&
+               ret = fdb && fdb->dst && fdb->dst->dev != dev &&
                        fdb->dst->state == BR_STATE_FORWARDING;
        }
        rcu_read_unlock();
@@ -281,6 +294,10 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
                        if (has_expired(br, f))
                                continue;
 
+                       /* ignore pseudo entry for local MAC address */
+                       if (!f->dst)
+                               continue;
+
                        if (skip) {
                                --skip;
                                continue;
@@ -347,7 +364,6 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
                fdb->is_static = 0;
                fdb->updated = fdb->used = jiffies;
                hlist_add_head_rcu(&fdb->hlist, head);
-               fdb_notify(fdb, RTM_NEWNEIGH);
        }
        return fdb;
 }
@@ -371,7 +387,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                br_warn(br, "adding interface %s with same address "
                       "as a received packet\n",
                       source->dev->name);
-               fdb_delete(fdb);
+               fdb_delete(br, fdb);
        }
 
        fdb = fdb_create(head, source, addr);
@@ -379,6 +395,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                return -ENOMEM;
 
        fdb->is_local = fdb->is_static = 1;
+       fdb_notify(br, fdb, RTM_NEWNEIGH);
        return 0;
 }
 
@@ -424,9 +441,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                }
        } else {
                spin_lock(&br->hash_lock);
-               if (likely(!fdb_find(head, addr)))
-                       fdb_create(head, source, addr);
-
+               if (likely(!fdb_find(head, addr))) {
+                       fdb = fdb_create(head, source, addr);
+                       if (fdb)
+                               fdb_notify(br, fdb, RTM_NEWNEIGH);
+               }
                /* else  we lose race and someone else inserts
                 * it first, don't bother updating
                 */
@@ -446,7 +465,7 @@ static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
                return NUD_REACHABLE;
 }
 
-static int fdb_fill_info(struct sk_buff *skb,
+static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
                         const struct net_bridge_fdb_entry *fdb,
                         u32 pid, u32 seq, int type, unsigned int flags)
 {
@@ -459,14 +478,13 @@ static int fdb_fill_info(struct sk_buff *skb,
        if (nlh == NULL)
                return -EMSGSIZE;
 
-
        ndm = nlmsg_data(nlh);
        ndm->ndm_family  = AF_BRIDGE;
        ndm->ndm_pad1    = 0;
        ndm->ndm_pad2    = 0;
        ndm->ndm_flags   = 0;
        ndm->ndm_type    = 0;
-       ndm->ndm_ifindex = fdb->dst->dev->ifindex;
+       ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
        ndm->ndm_state   = fdb_to_nud(fdb);
 
        NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
@@ -491,9 +509,10 @@ static inline size_t fdb_nlmsg_size(void)
                + nla_total_size(sizeof(struct nda_cacheinfo));
 }
 
-static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+static void fdb_notify(struct net_bridge *br,
+                      const struct net_bridge_fdb_entry *fdb, int type)
 {
-       struct net *net = dev_net(fdb->dst->dev);
+       struct net *net = dev_net(br->dev);
        struct sk_buff *skb;
        int err = -ENOBUFS;
 
@@ -501,7 +520,7 @@ static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
        if (skb == NULL)
                goto errout;
 
-       err = fdb_fill_info(skb, fdb, 0, 0, type, 0);
+       err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -538,7 +557,7 @@ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                if (idx < cb->args[0])
                                        goto skip;
 
-                               if (fdb_fill_info(skb, f,
+                               if (fdb_fill_info(skb, br, f,
                                                  NETLINK_CB(cb->skb).pid,
                                                  cb->nlh->nlmsg_seq,
                                                  RTM_NEWNEIGH,
@@ -572,6 +591,7 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
                fdb = fdb_create(head, source, addr);
                if (!fdb)
                        return -ENOMEM;
+               fdb_notify(br, fdb, RTM_NEWNEIGH);
        } else {
                if (flags & NLM_F_EXCL)
                        return -EEXIST;
@@ -587,7 +607,7 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
                        fdb->is_local = fdb->is_static = 0;
 
                fdb->updated = fdb->used = jiffies;
-               fdb_notify(fdb, RTM_NEWNEIGH);
+               fdb_notify(br, fdb, RTM_NEWNEIGH);
        }
 
        return 0;
@@ -667,7 +687,7 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
        if (!fdb)
                return -ENOENT;
 
-       fdb_delete(fdb);
+       fdb_delete(p->br, fdb);
        return 0;
 }
 
index ee64287..61f6534 100644 (file)
@@ -98,7 +98,7 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
 /* called with rcu_read_lock */
 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
-       if (should_deliver(to, skb)) {
+       if (to && should_deliver(to, skb)) {
                __br_deliver(to, skb);
                return;
        }
index 375417e..568d5bf 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/slab.h>
 #include <linux/timer.h>
 #include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/mld.h>
 #include <net/addrconf.h>
@@ -36,7 +36,7 @@
 #define mlock_dereference(X, br) \
        rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
 {
        if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
@@ -52,7 +52,7 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
        switch (a->proto) {
        case htons(ETH_P_IP):
                return a->u.ip4 == b->u.ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
 #endif
@@ -65,7 +65,7 @@ static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
        return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
                                const struct in6_addr *ip)
 {
@@ -79,7 +79,7 @@ static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
        switch (ip->proto) {
        case htons(ETH_P_IP):
                return __br_ip4_hash(mdb, ip->u.ip4);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return __br_ip6_hash(mdb, &ip->u.ip6);
 #endif
@@ -121,7 +121,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(
        return br_mdb_ip_get(mdb, &br_dst);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct net_bridge_mdb_entry *br_mdb_ip6_get(
        struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
 {
@@ -152,7 +152,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
        case htons(ETH_P_IP):
                ip.u.ip4 = ip_hdr(skb)->daddr;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                ip.u.ip6 = ipv6_hdr(skb)->daddr;
                break;
@@ -411,7 +411,7 @@ out:
        return skb;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
                                                    const struct in6_addr *group)
 {
@@ -496,7 +496,7 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
        switch (addr->proto) {
        case htons(ETH_P_IP):
                return br_ip4_multicast_alloc_query(br, addr->u.ip4);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
 #endif
@@ -773,7 +773,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
        return br_multicast_add_group(br, port, &br_group);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_add_group(struct net_bridge *br,
                                      struct net_bridge_port *port,
                                      const struct in6_addr *group)
@@ -845,7 +845,7 @@ static void br_multicast_send_query(struct net_bridge *br,
        br_group.proto = htons(ETH_P_IP);
        __br_multicast_send_query(br, port, &br_group);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        br_group.proto = htons(ETH_P_IPV6);
        __br_multicast_send_query(br, port, &br_group);
 #endif
@@ -989,7 +989,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
        return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                                        struct net_bridge_port *port,
                                        struct sk_buff *skb)
@@ -1185,7 +1185,7 @@ out:
        return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_query(struct net_bridge *br,
                                  struct net_bridge_port *port,
                                  struct sk_buff *skb)
@@ -1334,7 +1334,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
        br_multicast_leave_group(br, port, &br_group);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static void br_ip6_multicast_leave_group(struct net_bridge *br,
                                         struct net_bridge_port *port,
                                         const struct in6_addr *group)
@@ -1449,7 +1449,7 @@ err_out:
        return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_multicast_ipv6_rcv(struct net_bridge *br,
                                 struct net_bridge_port *port,
                                 struct sk_buff *skb)
@@ -1596,7 +1596,7 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
        switch (skb->protocol) {
        case htons(ETH_P_IP):
                return br_multicast_ipv4_rcv(br, port, skb);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return br_multicast_ipv6_rcv(br, port, skb);
 #endif
index d6ec372..8412247 100644 (file)
@@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
        return NULL;
 }
 
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+       return dst->dev->mtu;
+}
+
 static struct dst_ops fake_dst_ops = {
        .family =               AF_INET,
        .protocol =             cpu_to_be16(ETH_P_IP),
        .update_pmtu =          fake_update_pmtu,
        .cow_metrics =          fake_cow_metrics,
        .neigh_lookup =         fake_neigh_lookup,
+       .mtu =                  fake_mtu,
 };
 
 /*
@@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
        rt->dst.dev = br->dev;
        rt->dst.path = &rt->dst;
        dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
-       rt->dst.flags   = DST_NOXFRM;
+       rt->dst.flags   = DST_NOXFRM | DST_NOPEER;
        rt->dst.ops = &fake_dst_ops;
 }
 
@@ -356,7 +362,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
        if (!skb->dev)
                goto free_skb;
        dst = skb_dst(skb);
-       neigh = dst_get_neighbour(dst);
+       neigh = dst_get_neighbour_noref(dst);
        if (neigh->hh.hh_len) {
                neigh_hh_bridge(&neigh->hh, skb);
                skb->dev = nf_bridge->physindev;
@@ -807,7 +813,7 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
        return NF_STOLEN;
 }
 
-#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
        int ret;
index 4027029..0b67a63 100644 (file)
@@ -56,7 +56,7 @@ struct br_ip
 {
        union {
                __be32  ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct in6_addr ip6;
 #endif
        } u;
@@ -348,6 +348,7 @@ extern void br_fdb_fini(void);
 extern void br_fdb_flush(struct net_bridge *br);
 extern void br_fdb_changeaddr(struct net_bridge_port *p,
                              const unsigned char *newaddr);
+extern void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
 extern void br_fdb_cleanup(unsigned long arg);
 extern void br_fdb_delete_by_port(struct net_bridge *br,
                                  const struct net_bridge_port *p, int do_all);
@@ -536,7 +537,7 @@ extern void br_stp_port_timer_init(struct net_bridge_port *p);
 extern unsigned long br_timer_value(const struct timer_list *timer);
 
 /* br.c */
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
 extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr);
 #endif
 
index 88d7d1d..f88ee53 100644 (file)
@@ -107,7 +107,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
                goto out;
        }
 
-#if defined(CONFIG_BRIDGE_EBT_IP6) || defined(CONFIG_BRIDGE_EBT_IP6_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE_EBT_IP6)
        if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto ==
           htons(ETH_P_IPV6)) {
                const struct ipv6hdr *ih;
index 529750d..936361e 100644 (file)
@@ -40,3 +40,14 @@ config CAIF_NETDEV
        If you select to build it as a built-in then the main CAIF device must
        also be a built-in.
        If unsure say Y.
+
+config CAIF_USB
+       tristate "CAIF USB support"
+       depends on CAIF
+       default n
+       ---help---
+       Say Y if you are using CAIF over USB CDC NCM.
+       This can be either built-in or a loadable module,
+       If you select to build it as a built-in then the main CAIF device must
+       also be a built-in.
+       If unsure say N.
index ebcd4e7..cc2b511 100644 (file)
@@ -10,5 +10,6 @@ caif-y := caif_dev.o \
 obj-$(CONFIG_CAIF) += caif.o
 obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
 obj-$(CONFIG_CAIF) += caif_socket.o
+obj-$(CONFIG_CAIF_USB) += caif_usb.o
 
 export-y := caif.o
index f7e8c70..b0ce14f 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/netdevice.h>
 #include <linux/mutex.h>
 #include <linux/module.h>
+#include <linux/spinlock.h>
 #include <net/netns/generic.h>
 #include <net/net_namespace.h>
 #include <net/pkt_sched.h>
@@ -34,6 +35,10 @@ struct caif_device_entry {
        struct list_head list;
        struct net_device *netdev;
        int __percpu *pcpu_refcnt;
+       spinlock_t flow_lock;
+       struct sk_buff *xoff_skb;
+       void (*xoff_skb_dtor)(struct sk_buff *skb);
+       bool xoff;
 };
 
 struct caif_device_entry_list {
@@ -48,11 +53,11 @@ struct caif_net {
 };
 
 static int caif_net_id;
+static int q_high = 50; /* Percent */
 
 struct cfcnfg *get_cfcnfg(struct net *net)
 {
        struct caif_net *caifn;
-       BUG_ON(!net);
        caifn = net_generic(net, caif_net_id);
        if (!caifn)
                return NULL;
@@ -63,7 +68,6 @@ EXPORT_SYMBOL(get_cfcnfg);
 static struct caif_device_entry_list *caif_device_list(struct net *net)
 {
        struct caif_net *caifn;
-       BUG_ON(!net);
        caifn = net_generic(net, caif_net_id);
        if (!caifn)
                return NULL;
@@ -126,17 +130,106 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
        return NULL;
 }
 
+void caif_flow_cb(struct sk_buff *skb)
+{
+       struct caif_device_entry *caifd;
+       void (*dtor)(struct sk_buff *skb) = NULL;
+       bool send_xoff;
+
+       WARN_ON(skb->dev == NULL);
+
+       rcu_read_lock();
+       caifd = caif_get(skb->dev);
+       caifd_hold(caifd);
+       rcu_read_unlock();
+
+       spin_lock_bh(&caifd->flow_lock);
+       send_xoff = caifd->xoff;
+       caifd->xoff = 0;
+       if (!WARN_ON(caifd->xoff_skb_dtor == NULL)) {
+               WARN_ON(caifd->xoff_skb != skb);
+               dtor = caifd->xoff_skb_dtor;
+               caifd->xoff_skb = NULL;
+               caifd->xoff_skb_dtor = NULL;
+       }
+       spin_unlock_bh(&caifd->flow_lock);
+
+       if (dtor)
+               dtor(skb);
+
+       if (send_xoff)
+               caifd->layer.up->
+                       ctrlcmd(caifd->layer.up,
+                               _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
+                               caifd->layer.id);
+       caifd_put(caifd);
+}
+
 static int transmit(struct cflayer *layer, struct cfpkt *pkt)
 {
-       int err;
+       int err, high = 0, qlen = 0;
+       struct caif_dev_common *caifdev;
        struct caif_device_entry *caifd =
            container_of(layer, struct caif_device_entry, layer);
        struct sk_buff *skb;
+       struct netdev_queue *txq;
+
+       rcu_read_lock_bh();
 
        skb = cfpkt_tonative(pkt);
        skb->dev = caifd->netdev;
        skb_reset_network_header(skb);
        skb->protocol = htons(ETH_P_CAIF);
+       caifdev = netdev_priv(caifd->netdev);
+
+       /* Check if we need to handle xoff */
+       if (likely(caifd->netdev->tx_queue_len == 0))
+               goto noxoff;
+
+       if (unlikely(caifd->xoff))
+               goto noxoff;
+
+       if (likely(!netif_queue_stopped(caifd->netdev))) {
+               /* If we run with a TX queue, check if the queue is too long*/
+               txq = netdev_get_tx_queue(skb->dev, 0);
+               qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
+
+               if (likely(qlen == 0))
+                       goto noxoff;
+
+               high = (caifd->netdev->tx_queue_len * q_high) / 100;
+               if (likely(qlen < high))
+                       goto noxoff;
+       }
+
+       /* Hold lock while accessing xoff */
+       spin_lock_bh(&caifd->flow_lock);
+       if (caifd->xoff) {
+               spin_unlock_bh(&caifd->flow_lock);
+               goto noxoff;
+       }
+
+       /*
+        * Handle flow off, we do this by temporary hi-jacking this
+        * skb's destructor function, and replace it with our own
+        * flow-on callback. The callback will set flow-on and call
+        * the original destructor.
+        */
+
+       pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
+                       netif_queue_stopped(caifd->netdev),
+                       qlen, high);
+       caifd->xoff = 1;
+       caifd->xoff_skb = skb;
+       caifd->xoff_skb_dtor = skb->destructor;
+       skb->destructor = caif_flow_cb;
+       spin_unlock_bh(&caifd->flow_lock);
+
+       caifd->layer.up->ctrlcmd(caifd->layer.up,
+                                       _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
+                                       caifd->layer.id);
+noxoff:
+       rcu_read_unlock_bh();
 
        err = dev_queue_xmit(skb);
        if (err > 0)
@@ -232,6 +325,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        if (!caifd)
                return;
        *layer = &caifd->layer;
+       spin_lock_init(&caifd->flow_lock);
 
        switch (caifdev->link_select) {
        case CAIF_LINK_HIGH_BANDW:
@@ -262,6 +356,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        if (rcv_func)
                *rcv_func = receive;
 }
+EXPORT_SYMBOL(caif_enroll_dev);
 
 /* notify Caif of device events */
 static int caif_device_notify(struct notifier_block *me, unsigned long what,
@@ -315,6 +410,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
                        break;
                }
 
+               caifd->xoff = 0;
                cfcnfg_set_phy_state(cfg, &caifd->layer, true);
                rcu_read_unlock();
 
@@ -336,6 +432,24 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
                caifd->layer.up->ctrlcmd(caifd->layer.up,
                                         _CAIF_CTRLCMD_PHYIF_DOWN_IND,
                                         caifd->layer.id);
+
+               spin_lock_bh(&caifd->flow_lock);
+
+               /*
+                * Replace our xoff-destructor with original destructor.
+                * We trust that skb->destructor *always* is called before
+                * the skb reference is invalid. The hijacked SKB destructor
+                * takes the flow_lock so manipulating the skb->destructor here
+                * should be safe.
+               */
+               if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
+                       caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
+
+               caifd->xoff = 0;
+               caifd->xoff_skb_dtor = NULL;
+               caifd->xoff_skb = NULL;
+
+               spin_unlock_bh(&caifd->flow_lock);
                caifd_put(caifd);
                break;
 
@@ -391,15 +505,15 @@ static struct notifier_block caif_device_notifier = {
 static int caif_init_net(struct net *net)
 {
        struct caif_net *caifn = net_generic(net, caif_net_id);
-       BUG_ON(!caifn);
+       if (WARN_ON(!caifn))
+               return -EINVAL;
+
        INIT_LIST_HEAD(&caifn->caifdevs.list);
        mutex_init(&caifn->caifdevs.lock);
 
        caifn->cfg = cfcnfg_create();
-       if (!caifn->cfg) {
-               pr_warn("can't create cfcnfg\n");
+       if (!caifn->cfg)
                return -ENOMEM;
-       }
 
        return 0;
 }
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
new file mode 100644 (file)
index 0000000..5fc9eca
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * CAIF USB handler
+ * Copyright (C) ST-Ericsson AB 2011
+ * Author:     Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/usbnet.h>
+#include <net/netns/generic.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfcnfg.h>
+
+MODULE_LICENSE("GPL");
+
+#define CFUSB_PAD_DESCR_SZ 1   /* Alignment descriptor length */
+#define CFUSB_ALIGNMENT 4      /* Number of bytes to align. */
+#define CFUSB_MAX_HEADLEN (CFUSB_PAD_DESCR_SZ + CFUSB_ALIGNMENT-1)
+#define STE_USB_VID 0x04cc     /* USB Product ID for ST-Ericsson */
+#define STE_USB_PID_CAIF 0x2306        /* Product id for CAIF Modems */
+
+struct cfusbl {
+       struct cflayer layer;
+       u8 tx_eth_hdr[ETH_HLEN];
+};
+
+static bool pack_added;
+
+static int cfusbl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+       u8 hpad;
+
+       /* Remove padding. */
+       cfpkt_extr_head(pkt, &hpad, 1);
+       cfpkt_extr_head(pkt, NULL, hpad);
+       return layr->up->receive(layr->up, pkt);
+}
+
+static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+       struct caif_payload_info *info;
+       u8 hpad;
+       u8 zeros[CFUSB_ALIGNMENT];
+       struct sk_buff *skb;
+       struct cfusbl *usbl = container_of(layr, struct cfusbl, layer);
+
+       skb = cfpkt_tonative(pkt);
+
+       skb_reset_network_header(skb);
+       skb->protocol = htons(ETH_P_IP);
+
+       info = cfpkt_info(pkt);
+       hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1);
+
+       if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
+               pr_warn("Headroom to small\n");
+               kfree_skb(skb);
+               return -EIO;
+       }
+       memset(zeros, 0, hpad);
+
+       cfpkt_add_head(pkt, zeros, hpad);
+       cfpkt_add_head(pkt, &hpad, 1);
+       cfpkt_add_head(pkt, usbl->tx_eth_hdr, sizeof(usbl->tx_eth_hdr));
+       return layr->dn->transmit(layr->dn, pkt);
+}
+
+static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+                                       int phyid)
+{
+       if (layr->up && layr->up->ctrlcmd)
+               layr->up->ctrlcmd(layr->up, ctrl, layr->id);
+}
+
+struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
+                                       u8 braddr[ETH_ALEN])
+{
+       struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
+
+       if (!this) {
+               pr_warn("Out of memory\n");
+               return NULL;
+       }
+       caif_assert(offsetof(struct cfusbl, layer) == 0);
+
+       memset(this, 0, sizeof(struct cflayer));
+       this->layer.receive = cfusbl_receive;
+       this->layer.transmit = cfusbl_transmit;
+       this->layer.ctrlcmd = cfusbl_ctrlcmd;
+       snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "usb%d", phyid);
+       this->layer.id = phyid;
+
+       /*
+        * Construct TX ethernet header:
+        *      0-5     destination address
+        *      5-11    source address
+        *      12-13   protocol type
+        */
+       memcpy(&this->tx_eth_hdr[ETH_ALEN], braddr, ETH_ALEN);
+       memcpy(&this->tx_eth_hdr[ETH_ALEN], ethaddr, ETH_ALEN);
+       this->tx_eth_hdr[12] = cpu_to_be16(ETH_P_802_EX1) & 0xff;
+       this->tx_eth_hdr[13] = (cpu_to_be16(ETH_P_802_EX1) >> 8) & 0xff;
+       pr_debug("caif ethernet TX-header dst:%pM src:%pM type:%02x%02x\n",
+                       this->tx_eth_hdr, this->tx_eth_hdr + ETH_ALEN,
+                       this->tx_eth_hdr[12], this->tx_eth_hdr[13]);
+
+       return (struct cflayer *) this;
+}
+
+static struct packet_type caif_usb_type __read_mostly = {
+       .type = cpu_to_be16(ETH_P_802_EX1),
+};
+
+static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+                             void *arg)
+{
+       struct net_device *dev = arg;
+       struct caif_dev_common common;
+       struct cflayer *layer, *link_support;
+       struct usbnet   *usbnet = netdev_priv(dev);
+       struct usb_device       *usbdev = usbnet->udev;
+       struct ethtool_drvinfo drvinfo;
+
+       /*
+        * Quirks: High-jack ethtool to find if we have a NCM device,
+        * and find it's VID/PID.
+        */
+       if (dev->ethtool_ops == NULL || dev->ethtool_ops->get_drvinfo == NULL)
+               return 0;
+
+       dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+       if (strncmp(drvinfo.driver, "cdc_ncm", 7) != 0)
+               return 0;
+
+       pr_debug("USB CDC NCM device VID:0x%4x PID:0x%4x\n",
+               le16_to_cpu(usbdev->descriptor.idVendor),
+               le16_to_cpu(usbdev->descriptor.idProduct));
+
+       /* Check for VID/PID that supports CAIF */
+       if (!(le16_to_cpu(usbdev->descriptor.idVendor) == STE_USB_VID &&
+               le16_to_cpu(usbdev->descriptor.idProduct) == STE_USB_PID_CAIF))
+               return 0;
+
+       if (what == NETDEV_UNREGISTER)
+               module_put(THIS_MODULE);
+
+       if (what != NETDEV_REGISTER)
+               return 0;
+
+       __module_get(THIS_MODULE);
+
+       memset(&common, 0, sizeof(common));
+       common.use_frag = false;
+       common.use_fcs = false;
+       common.use_stx = false;
+       common.link_select = CAIF_LINK_HIGH_BANDW;
+       common.flowctrl = NULL;
+
+       link_support = cfusbl_create(dev->ifindex, dev->dev_addr,
+                                       dev->broadcast);
+
+       if (!link_support)
+               return -ENOMEM;
+
+       if (dev->num_tx_queues > 1)
+               pr_warn("USB device uses more than one tx queue\n");
+
+       caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+                       &layer, &caif_usb_type.func);
+       if (!pack_added)
+               dev_add_pack(&caif_usb_type);
+       pack_added = true;
+
+       strncpy(layer->name, dev->name,
+                       sizeof(layer->name) - 1);
+       layer->name[sizeof(layer->name) - 1] = 0;
+
+       return 0;
+}
+
+static struct notifier_block caif_device_notifier = {
+       .notifier_call = cfusbl_device_notify,
+       .priority = 0,
+};
+
+static int __init cfusbl_init(void)
+{
+       return register_netdevice_notifier(&caif_device_notifier);
+}
+
+static void __exit cfusbl_exit(void)
+{
+       unregister_netdevice_notifier(&caif_device_notifier);
+       dev_remove_pack(&caif_usb_type);
+}
+
+module_init(cfusbl_init);
+module_exit(cfusbl_exit);
index de53907..e335ba8 100644 (file)
@@ -63,7 +63,6 @@ static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
        return (struct cfpkt *) skb;
 }
 
-
 struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
 {
        struct cfpkt *pkt = skb_to_pkt(nativepkt);
@@ -105,14 +104,12 @@ void cfpkt_destroy(struct cfpkt *pkt)
        kfree_skb(skb);
 }
 
-
 inline bool cfpkt_more(struct cfpkt *pkt)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
        return skb->len > 0;
 }
 
-
 int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
@@ -148,6 +145,7 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
                memcpy(data, from, len);
        return 0;
 }
+EXPORT_SYMBOL(cfpkt_extr_head);
 
 int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
 {
@@ -171,13 +169,11 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
        return 0;
 }
 
-
 int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
 {
        return cfpkt_add_body(pkt, NULL, len);
 }
 
-
 int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
@@ -256,21 +252,19 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
        memcpy(to, data, len);
        return 0;
 }
-
+EXPORT_SYMBOL(cfpkt_add_head);
 
 inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
 {
        return cfpkt_add_body(pkt, data, len);
 }
 
-
 inline u16 cfpkt_getlen(struct cfpkt *pkt)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
        return skb->len;
 }
 
-
 inline u16 cfpkt_iterate(struct cfpkt *pkt,
                            u16 (*iter_func)(u16, void *, u16),
                            u16 data)
@@ -288,7 +282,6 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt,
        return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
 }
 
-
 int cfpkt_setlen(struct cfpkt *pkt, u16 len)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
@@ -400,3 +393,4 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
 {
        return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
 }
+EXPORT_SYMBOL(cfpkt_info);
index 81660f8..6dc75d4 100644 (file)
@@ -190,7 +190,7 @@ out:
 
 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
 {
-       caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
+       caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE);
 
        /* Add info for MUX-layer to route the packet out. */
        cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
index 42599e3..3a94eae 100644 (file)
@@ -477,7 +477,6 @@ int crush_do_rule(struct crush_map *map,
        int i, j;
        int numrep;
        int firstn;
-       int rc = -1;
 
        BUG_ON(ruleno >= map->max_rules);
 
@@ -491,23 +490,18 @@ int crush_do_rule(struct crush_map *map,
         * that this may or may not correspond to the specific types
         * referenced by the crush rule.
         */
-       if (force >= 0) {
-               if (force >= map->max_devices ||
-                   map->device_parents[force] == 0) {
-                       /*dprintk("CRUSH: forcefed device dne\n");*/
-                       rc = -1;  /* force fed device dne */
-                       goto out;
-               }
-               if (!is_out(map, weight, force, x)) {
-                       while (1) {
-                               force_context[++force_pos] = force;
-                               if (force >= 0)
-                                       force = map->device_parents[force];
-                               else
-                                       force = map->bucket_parents[-1-force];
-                               if (force == 0)
-                                       break;
-                       }
+       if (force >= 0 &&
+           force < map->max_devices &&
+           map->device_parents[force] != 0 &&
+           !is_out(map, weight, force, x)) {
+               while (1) {
+                       force_context[++force_pos] = force;
+                       if (force >= 0)
+                               force = map->device_parents[force];
+                       else
+                               force = map->bucket_parents[-1-force];
+                       if (force == 0)
+                               break;
                }
        }
 
@@ -600,10 +594,7 @@ int crush_do_rule(struct crush_map *map,
                        BUG_ON(1);
                }
        }
-       rc = result_len;
-
-out:
-       return rc;
+       return result_len;
 }
 
 
index c4ecc86..674641b 100644 (file)
@@ -8,7 +8,8 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
 
 obj-y               += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
-                       neighbour.o rtnetlink.o utils.o link_watch.o filter.o
+                       neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
+                       sock_diag.o
 
 obj-$(CONFIG_XFRM) += flow.o
 obj-y += net-sysfs.o
index d5e2c4c..43d94ce 100644 (file)
@@ -366,7 +366,7 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
                dev_hold(dst->dev);
                dev_put(dev);
                rcu_read_lock();
-               neigh = dst_get_neighbour(dst);
+               neigh = dst_get_neighbour_noref(dst);
                if (neigh && neigh->dev == dev) {
                        neigh->dev = dst->dev;
                        dev_hold(dst->dev);
index 31b0b7f..597732c 100644 (file)
@@ -515,34 +515,44 @@ err_out:
 static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
                                                     void __user *useraddr)
 {
-       struct ethtool_rxfh_indir *indir;
-       u32 table_size;
-       size_t full_size;
+       u32 user_size, dev_size;
+       u32 *indir;
        int ret;
 
-       if (!dev->ethtool_ops->get_rxfh_indir)
+       if (!dev->ethtool_ops->get_rxfh_indir_size ||
+           !dev->ethtool_ops->get_rxfh_indir)
+               return -EOPNOTSUPP;
+       dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+       if (dev_size == 0)
                return -EOPNOTSUPP;
 
-       if (copy_from_user(&table_size,
+       if (copy_from_user(&user_size,
                           useraddr + offsetof(struct ethtool_rxfh_indir, size),
-                          sizeof(table_size)))
+                          sizeof(user_size)))
                return -EFAULT;
 
-       if (table_size >
-           (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
-               return -ENOMEM;
-       full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
-       indir = kzalloc(full_size, GFP_USER);
+       if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size),
+                        &dev_size, sizeof(dev_size)))
+               return -EFAULT;
+
+       /* If the user buffer size is 0, this is just a query for the
+        * device table size.  Otherwise, if it's smaller than the
+        * device table size it's an error.
+        */
+       if (user_size < dev_size)
+               return user_size == 0 ? 0 : -EINVAL;
+
+       indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
        if (!indir)
                return -ENOMEM;
 
-       indir->cmd = ETHTOOL_GRXFHINDIR;
-       indir->size = table_size;
        ret = dev->ethtool_ops->get_rxfh_indir(dev, indir);
        if (ret)
                goto out;
 
-       if (copy_to_user(useraddr, indir, full_size))
+       if (copy_to_user(useraddr +
+                        offsetof(struct ethtool_rxfh_indir, ring_index[0]),
+                        indir, dev_size * sizeof(indir[0])))
                ret = -EFAULT;
 
 out:
@@ -553,30 +563,56 @@ out:
 static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
                                                     void __user *useraddr)
 {
-       struct ethtool_rxfh_indir *indir;
-       u32 table_size;
-       size_t full_size;
+       struct ethtool_rxnfc rx_rings;
+       u32 user_size, dev_size, i;
+       u32 *indir;
        int ret;
 
-       if (!dev->ethtool_ops->set_rxfh_indir)
+       if (!dev->ethtool_ops->get_rxfh_indir_size ||
+           !dev->ethtool_ops->set_rxfh_indir ||
+           !dev->ethtool_ops->get_rxnfc)
+               return -EOPNOTSUPP;
+       dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+       if (dev_size == 0)
                return -EOPNOTSUPP;
 
-       if (copy_from_user(&table_size,
+       if (copy_from_user(&user_size,
                           useraddr + offsetof(struct ethtool_rxfh_indir, size),
-                          sizeof(table_size)))
+                          sizeof(user_size)))
                return -EFAULT;
 
-       if (table_size >
-           (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
-               return -ENOMEM;
-       full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
-       indir = kmalloc(full_size, GFP_USER);
+       if (user_size != 0 && user_size != dev_size)
+               return -EINVAL;
+
+       indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
        if (!indir)
                return -ENOMEM;
 
-       if (copy_from_user(indir, useraddr, full_size)) {
-               ret = -EFAULT;
+       rx_rings.cmd = ETHTOOL_GRXRINGS;
+       ret = dev->ethtool_ops->get_rxnfc(dev, &rx_rings, NULL);
+       if (ret)
                goto out;
+
+       if (user_size == 0) {
+               for (i = 0; i < dev_size; i++)
+                       indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
+       } else {
+               if (copy_from_user(indir,
+                                 useraddr +
+                                 offsetof(struct ethtool_rxfh_indir,
+                                          ring_index[0]),
+                                 dev_size * sizeof(indir[0]))) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+
+               /* Validate ring indices */
+               for (i = 0; i < dev_size; i++) {
+                       if (indir[i] >= rx_rings.data) {
+                               ret = -EINVAL;
+                               goto out;
+                       }
+               }
        }
 
        ret = dev->ethtool_ops->set_rxfh_indir(dev, indir);
index 8ae42de..e318c7e 100644 (file)
@@ -358,6 +358,18 @@ void flow_cache_flush(void)
        put_online_cpus();
 }
 
+static void flow_cache_flush_task(struct work_struct *work)
+{
+       flow_cache_flush();
+}
+
+static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+
+void flow_cache_flush_deferred(void)
+{
+       schedule_work(&flow_cache_flush_work);
+}
+
 static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
 {
        struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
index cdf8dc3..4af151e 100644 (file)
@@ -1190,7 +1190,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
 
                        rcu_read_lock();
                        /* On shaper/eql skb->dst->neighbour != neigh :( */
-                       if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
+                       if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL)
                                n1 = n2;
                        n1->output(n1, skb);
                        rcu_read_unlock();
index 3bf72b6..abf4393 100644 (file)
@@ -622,15 +622,15 @@ static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
                                           char *buf)
 {
        struct rps_dev_flow_table *flow_table;
-       unsigned int val = 0;
+       unsigned long val = 0;
 
        rcu_read_lock();
        flow_table = rcu_dereference(queue->rps_flow_table);
        if (flow_table)
-               val = flow_table->mask + 1;
+               val = (unsigned long)flow_table->mask + 1;
        rcu_read_unlock();
 
-       return sprintf(buf, "%u\n", val);
+       return sprintf(buf, "%lu\n", val);
 }
 
 static void rps_dev_flow_table_release_work(struct work_struct *work)
@@ -654,33 +654,46 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
                                     struct rx_queue_attribute *attr,
                                     const char *buf, size_t len)
 {
-       unsigned int count;
-       char *endp;
+       unsigned long mask, count;
        struct rps_dev_flow_table *table, *old_table;
        static DEFINE_SPINLOCK(rps_dev_flow_lock);
+       int rc;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
 
-       count = simple_strtoul(buf, &endp, 0);
-       if (endp == buf)
-               return -EINVAL;
+       rc = kstrtoul(buf, 0, &count);
+       if (rc < 0)
+               return rc;
 
        if (count) {
-               int i;
-
-               if (count > 1<<30) {
+               mask = count - 1;
+               /* mask = roundup_pow_of_two(count) - 1;
+                * without overflows...
+                */
+               while ((mask | (mask >> 1)) != mask)
+                       mask |= (mask >> 1);
+               /* On 64 bit arches, must check mask fits in table->mask (u32),
+                * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
+                * doesnt overflow.
+                */
+#if BITS_PER_LONG > 32
+               if (mask > (unsigned long)(u32)mask)
+                       return -EINVAL;
+#else
+               if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
+                               / sizeof(struct rps_dev_flow)) {
                        /* Enforce a limit to prevent overflow */
                        return -EINVAL;
                }
-               count = roundup_pow_of_two(count);
-               table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
+#endif
+               table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
                if (!table)
                        return -ENOMEM;
 
-               table->mask = count - 1;
-               for (i = 0; i < count; i++)
-                       table->flows[i].cpu = RPS_NO_CPU;
+               table->mask = mask;
+               for (count = 0; count <= mask; count++)
+                       table->flows[count].cpu = RPS_NO_CPU;
        } else
                table = NULL;
 
@@ -1221,9 +1234,7 @@ static void netdev_queue_release(struct kobject *kobj)
 
 static struct kobj_type netdev_queue_ktype = {
        .sysfs_ops = &netdev_queue_sysfs_ops,
-#ifdef CONFIG_XPS
        .release = netdev_queue_release,
-#endif
        .default_attrs = netdev_queue_default_attrs,
 };
 
index 182236b..9b570a6 100644 (file)
  * but then some measure against one socket starving all other sockets
  * would be needed.
  *
- * It was 128 by default. Experiments with real servers show, that
+ * The minimum value of it is 128. Experiments with real servers show that
  * it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems. This value is adjusted to 128 for very small machines
- * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
+ * of problems.
+ * This value is adjusted to 128 for low memory machines,
+ * and it will increase in proportion to the memory of machine.
  * Note : Dont forget somaxconn that may limit backlog too.
  */
 int sysctl_max_syn_backlog = 256;
index 9083e82..dbf2dda 100644 (file)
@@ -273,6 +273,17 @@ EXPORT_SYMBOL_GPL(rtnl_unregister_all);
 
 static LIST_HEAD(link_ops);
 
+static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
+{
+       const struct rtnl_link_ops *ops;
+
+       list_for_each_entry(ops, &link_ops, list) {
+               if (!strcmp(ops->kind, kind))
+                       return ops;
+       }
+       return NULL;
+}
+
 /**
  * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
  * @ops: struct rtnl_link_ops * to register
@@ -285,6 +296,9 @@ static LIST_HEAD(link_ops);
  */
 int __rtnl_link_register(struct rtnl_link_ops *ops)
 {
+       if (rtnl_link_ops_get(ops->kind))
+               return -EEXIST;
+
        if (!ops->dellink)
                ops->dellink = unregister_netdevice_queue;
 
@@ -351,17 +365,6 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops)
 }
 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
 
-static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
-{
-       const struct rtnl_link_ops *ops;
-
-       list_for_each_entry(ops, &link_ops, list) {
-               if (!strcmp(ops->kind, kind))
-                       return ops;
-       }
-       return NULL;
-}
-
 static size_t rtnl_link_get_size(const struct net_device *dev)
 {
        const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
index 025233d..6fd4460 100644 (file)
@@ -19,6 +19,7 @@ static int __init net_secret_init(void)
 }
 late_initcall(net_secret_init);
 
+#ifdef CONFIG_INET
 static u32 seq_scale(u32 seq)
 {
        /*
@@ -33,8 +34,9 @@ static u32 seq_scale(u32 seq)
         */
        return seq + (ktime_to_ns(ktime_get_real()) >> 6);
 }
+#endif
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
                                   __be16 sport, __be16 dport)
 {
@@ -132,7 +134,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
 EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
 #endif
 
-#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
+#if IS_ENABLED(CONFIG_IP_DCCP)
 u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
                                __be16 sport, __be16 dport)
 {
@@ -154,7 +156,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
 }
 EXPORT_SYMBOL(secure_dccp_sequence_number);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
                                  __be16 sport, __be16 dport)
 {
index fd36462..da0c97f 100644 (file)
@@ -452,7 +452,7 @@ static void skb_release_head_state(struct sk_buff *skb)
                WARN_ON(in_irq());
                skb->destructor(skb);
        }
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        nf_conntrack_put(skb->nfct);
 #endif
 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
@@ -602,15 +602,14 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->ip_summed          = old->ip_summed;
        skb_copy_queue_mapping(new, old);
        new->priority           = old->priority;
-#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+#if IS_ENABLED(CONFIG_IP_VS)
        new->ipvs_property      = old->ipvs_property;
 #endif
        new->protocol           = old->protocol;
        new->mark               = old->mark;
        new->skb_iif            = old->skb_iif;
        __nf_copy(new, old);
-#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
-    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
        new->nf_trace           = old->nf_trace;
 #endif
 #ifdef CONFIG_NET_SCHED
index 9777da8..002939c 100644 (file)
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/user_namespace.h>
+#include <linux/jump_label.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
 #include <net/tcp.h>
 #endif
 
+static DEFINE_MUTEX(proto_list_mutex);
+static LIST_HEAD(proto_list);
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       struct proto *proto;
+       int ret = 0;
+
+       mutex_lock(&proto_list_mutex);
+       list_for_each_entry(proto, &proto_list, node) {
+               if (proto->init_cgroup) {
+                       ret = proto->init_cgroup(cgrp, ss);
+                       if (ret)
+                               goto out;
+               }
+       }
+
+       mutex_unlock(&proto_list_mutex);
+       return ret;
+out:
+       list_for_each_entry_continue_reverse(proto, &proto_list, node)
+               if (proto->destroy_cgroup)
+                       proto->destroy_cgroup(cgrp, ss);
+       mutex_unlock(&proto_list_mutex);
+       return ret;
+}
+
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       struct proto *proto;
+
+       mutex_lock(&proto_list_mutex);
+       list_for_each_entry_reverse(proto, &proto_list, node)
+               if (proto->destroy_cgroup)
+                       proto->destroy_cgroup(cgrp, ss);
+       mutex_unlock(&proto_list_mutex);
+}
+#endif
+
 /*
  * Each address family might have different locking rules, so we have
  * one slock key per address family:
 static struct lock_class_key af_family_keys[AF_MAX];
 static struct lock_class_key af_family_slock_keys[AF_MAX];
 
+struct jump_label_key memcg_socket_limit_enabled;
+EXPORT_SYMBOL(memcg_socket_limit_enabled);
+
 /*
  * Make lock validator output more readable. (we pre-construct these
  * strings build-time, so that runtime initialization of socket
@@ -295,11 +339,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
-       /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
-          number of warnings when compiling with -W --ANK
-        */
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf) {
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
                atomic_inc(&sk->sk_drops);
                trace_sock_rcvqueue_full(sk, skb);
                return -ENOMEM;
@@ -1323,7 +1363,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                newsk->sk_wq = NULL;
 
                if (newsk->sk_prot->sockets_allocated)
-                       percpu_counter_inc(newsk->sk_prot->sockets_allocated);
+                       sk_sockets_allocated_inc(newsk);
 
                if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
                        net_enable_timestamp();
@@ -1711,30 +1751,34 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
        struct proto *prot = sk->sk_prot;
        int amt = sk_mem_pages(size);
        long allocated;
+       int parent_status = UNDER_LIMIT;
 
        sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
-       allocated = atomic_long_add_return(amt, prot->memory_allocated);
+
+       allocated = sk_memory_allocated_add(sk, amt, &parent_status);
 
        /* Under limit. */
-       if (allocated <= prot->sysctl_mem[0]) {
-               if (prot->memory_pressure && *prot->memory_pressure)
-                       *prot->memory_pressure = 0;
+       if (parent_status == UNDER_LIMIT &&
+                       allocated <= sk_prot_mem_limits(sk, 0)) {
+               sk_leave_memory_pressure(sk);
                return 1;
        }
 
-       /* Under pressure. */
-       if (allocated > prot->sysctl_mem[1])
-               if (prot->enter_memory_pressure)
-                       prot->enter_memory_pressure(sk);
+       /* Under pressure. (we or our parents) */
+       if ((parent_status > SOFT_LIMIT) ||
+                       allocated > sk_prot_mem_limits(sk, 1))
+               sk_enter_memory_pressure(sk);
 
-       /* Over hard limit. */
-       if (allocated > prot->sysctl_mem[2])
+       /* Over hard limit (we or our parents) */
+       if ((parent_status == OVER_LIMIT) ||
+                       (allocated > sk_prot_mem_limits(sk, 2)))
                goto suppress_allocation;
 
        /* guarantee minimum buffer size under pressure */
        if (kind == SK_MEM_RECV) {
                if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
                        return 1;
+
        } else { /* SK_MEM_SEND */
                if (sk->sk_type == SOCK_STREAM) {
                        if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
@@ -1744,13 +1788,13 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
                                return 1;
        }
 
-       if (prot->memory_pressure) {
+       if (sk_has_memory_pressure(sk)) {
                int alloc;
 
-               if (!*prot->memory_pressure)
+               if (!sk_under_memory_pressure(sk))
                        return 1;
-               alloc = percpu_counter_read_positive(prot->sockets_allocated);
-               if (prot->sysctl_mem[2] > alloc *
+               alloc = sk_sockets_allocated_read_positive(sk);
+               if (sk_prot_mem_limits(sk, 2) > alloc *
                    sk_mem_pages(sk->sk_wmem_queued +
                                 atomic_read(&sk->sk_rmem_alloc) +
                                 sk->sk_forward_alloc))
@@ -1773,7 +1817,9 @@ suppress_allocation:
 
        /* Alas. Undo changes. */
        sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
-       atomic_long_sub(amt, prot->memory_allocated);
+
+       sk_memory_allocated_sub(sk, amt, parent_status);
+
        return 0;
 }
 EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1784,15 +1830,13 @@ EXPORT_SYMBOL(__sk_mem_schedule);
  */
 void __sk_mem_reclaim(struct sock *sk)
 {
-       struct proto *prot = sk->sk_prot;
-
-       atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
-                  prot->memory_allocated);
+       sk_memory_allocated_sub(sk,
+                               sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0);
        sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
 
-       if (prot->memory_pressure && *prot->memory_pressure &&
-           (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
-               *prot->memory_pressure = 0;
+       if (sk_under_memory_pressure(sk) &&
+           (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
+               sk_leave_memory_pressure(sk);
 }
 EXPORT_SYMBOL(__sk_mem_reclaim);
 
@@ -2283,9 +2327,6 @@ void sk_common_release(struct sock *sk)
 }
 EXPORT_SYMBOL(sk_common_release);
 
-static DEFINE_RWLOCK(proto_list_lock);
-static LIST_HEAD(proto_list);
-
 #ifdef CONFIG_PROC_FS
 #define PROTO_INUSE_NR 64      /* should be enough for the first time */
 struct prot_inuse {
@@ -2434,10 +2475,10 @@ int proto_register(struct proto *prot, int alloc_slab)
                }
        }
 
-       write_lock(&proto_list_lock);
+       mutex_lock(&proto_list_mutex);
        list_add(&prot->node, &proto_list);
        assign_proto_idx(prot);
-       write_unlock(&proto_list_lock);
+       mutex_unlock(&proto_list_mutex);
        return 0;
 
 out_free_timewait_sock_slab_name:
@@ -2460,10 +2501,10 @@ EXPORT_SYMBOL(proto_register);
 
 void proto_unregister(struct proto *prot)
 {
-       write_lock(&proto_list_lock);
+       mutex_lock(&proto_list_mutex);
        release_proto_idx(prot);
        list_del(&prot->node);
-       write_unlock(&proto_list_lock);
+       mutex_unlock(&proto_list_mutex);
 
        if (prot->slab != NULL) {
                kmem_cache_destroy(prot->slab);
@@ -2486,9 +2527,9 @@ EXPORT_SYMBOL(proto_unregister);
 
 #ifdef CONFIG_PROC_FS
 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(proto_list_lock)
+       __acquires(proto_list_mutex)
 {
-       read_lock(&proto_list_lock);
+       mutex_lock(&proto_list_mutex);
        return seq_list_start_head(&proto_list, *pos);
 }
 
@@ -2498,25 +2539,36 @@ static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 }
 
 static void proto_seq_stop(struct seq_file *seq, void *v)
-       __releases(proto_list_lock)
+       __releases(proto_list_mutex)
 {
-       read_unlock(&proto_list_lock);
+       mutex_unlock(&proto_list_mutex);
 }
 
 static char proto_method_implemented(const void *method)
 {
        return method == NULL ? 'n' : 'y';
 }
+static long sock_prot_memory_allocated(struct proto *proto)
+{
+       return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L;
+}
+
+static char *sock_prot_memory_pressure(struct proto *proto)
+{
+       return proto->memory_pressure != NULL ?
+       proto_memory_pressure(proto) ? "yes" : "no" : "NI";
+}
 
 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
 {
+
        seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
                        "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
                   proto->name,
                   proto->obj_size,
                   sock_prot_inuse_get(seq_file_net(seq), proto),
-                  proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
-                  proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
+                  sock_prot_memory_allocated(proto),
+                  sock_prot_memory_pressure(proto),
                   proto->max_header,
                   proto->slab == NULL ? "no" : "yes",
                   module_name(proto->owner),
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
new file mode 100644 (file)
index 0000000..711bdef
--- /dev/null
@@ -0,0 +1,169 @@
+#include <linux/mutex.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/net_namespace.h>
+#include <linux/module.h>
+
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+
+static struct sock_diag_handler *sock_diag_handlers[AF_MAX];
+static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
+static DEFINE_MUTEX(sock_diag_table_mutex);
+
+int sock_diag_check_cookie(void *sk, __u32 *cookie)
+{
+       if ((cookie[0] != INET_DIAG_NOCOOKIE ||
+            cookie[1] != INET_DIAG_NOCOOKIE) &&
+           ((u32)(unsigned long)sk != cookie[0] ||
+            (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
+               return -ESTALE;
+       else
+               return 0;
+}
+EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
+
+void sock_diag_save_cookie(void *sk, __u32 *cookie)
+{
+       cookie[0] = (u32)(unsigned long)sk;
+       cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+}
+EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
+
+void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+{
+       mutex_lock(&sock_diag_table_mutex);
+       inet_rcv_compat = fn;
+       mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
+
+void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+{
+       mutex_lock(&sock_diag_table_mutex);
+       inet_rcv_compat = NULL;
+       mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
+
+int sock_diag_register(struct sock_diag_handler *hndl)
+{
+       int err = 0;
+
+       if (hndl->family >= AF_MAX)
+               return -EINVAL;
+
+       mutex_lock(&sock_diag_table_mutex);
+       if (sock_diag_handlers[hndl->family])
+               err = -EBUSY;
+       else
+               sock_diag_handlers[hndl->family] = hndl;
+       mutex_unlock(&sock_diag_table_mutex);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sock_diag_register);
+
+void sock_diag_unregister(struct sock_diag_handler *hnld)
+{
+       int family = hnld->family;
+
+       if (family >= AF_MAX)
+               return;
+
+       mutex_lock(&sock_diag_table_mutex);
+       BUG_ON(sock_diag_handlers[family] != hnld);
+       sock_diag_handlers[family] = NULL;
+       mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_unregister);
+
+static inline struct sock_diag_handler *sock_diag_lock_handler(int family)
+{
+       if (sock_diag_handlers[family] == NULL)
+               request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+                               NETLINK_SOCK_DIAG, family);
+
+       mutex_lock(&sock_diag_table_mutex);
+       return sock_diag_handlers[family];
+}
+
+static inline void sock_diag_unlock_handler(struct sock_diag_handler *h)
+{
+       mutex_unlock(&sock_diag_table_mutex);
+}
+
+static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       int err;
+       struct sock_diag_req *req = NLMSG_DATA(nlh);
+       struct sock_diag_handler *hndl;
+
+       if (nlmsg_len(nlh) < sizeof(*req))
+               return -EINVAL;
+
+       hndl = sock_diag_lock_handler(req->sdiag_family);
+       if (hndl == NULL)
+               err = -ENOENT;
+       else
+               err = hndl->dump(skb, nlh);
+       sock_diag_unlock_handler(hndl);
+
+       return err;
+}
+
+static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       int ret;
+
+       switch (nlh->nlmsg_type) {
+       case TCPDIAG_GETSOCK:
+       case DCCPDIAG_GETSOCK:
+               if (inet_rcv_compat == NULL)
+                       request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+                                       NETLINK_SOCK_DIAG, AF_INET);
+
+               mutex_lock(&sock_diag_table_mutex);
+               if (inet_rcv_compat != NULL)
+                       ret = inet_rcv_compat(skb, nlh);
+               else
+                       ret = -EOPNOTSUPP;
+               mutex_unlock(&sock_diag_table_mutex);
+
+               return ret;
+       case SOCK_DIAG_BY_FAMILY:
+               return __sock_diag_rcv_msg(skb, nlh);
+       default:
+               return -EINVAL;
+       }
+}
+
+static DEFINE_MUTEX(sock_diag_mutex);
+
+static void sock_diag_rcv(struct sk_buff *skb)
+{
+       mutex_lock(&sock_diag_mutex);
+       netlink_rcv_skb(skb, &sock_diag_rcv_msg);
+       mutex_unlock(&sock_diag_mutex);
+}
+
+struct sock *sock_diag_nlsk;
+EXPORT_SYMBOL_GPL(sock_diag_nlsk);
+
+static int __init sock_diag_init(void)
+{
+       sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0,
+                                       sock_diag_rcv, NULL, THIS_MODULE);
+       return sock_diag_nlsk == NULL ? -ENOMEM : 0;
+}
+
+static void __exit sock_diag_exit(void)
+{
+       netlink_kernel_release(sock_diag_nlsk);
+}
+
+module_init(sock_diag_init);
+module_exit(sock_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);
index 67164bb..f053198 100644 (file)
@@ -29,7 +29,7 @@
 
 
 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
-static int ccid2_debug;
+static bool ccid2_debug;
 #define ccid2_pr_debug(format, a...)   DCCP_PR_DEBUG(ccid2_debug, format, ##a)
 #else
 #define ccid2_pr_debug(format, a...)
@@ -174,7 +174,7 @@ out:
 /*
  *     Congestion window validation (RFC 2861).
  */
-static int ccid2_do_cwv = 1;
+static bool ccid2_do_cwv = true;
 module_param(ccid2_do_cwv, bool, 0644);
 MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
 
index 3d604e1..5606273 100644 (file)
@@ -38,7 +38,7 @@
 #include <asm/unaligned.h>
 
 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
-static int ccid3_debug;
+static bool ccid3_debug;
 #define ccid3_pr_debug(format, a...)   DCCP_PR_DEBUG(ccid3_debug, format, ##a)
 #else
 #define ccid3_pr_debug(format, a...)
index 1f94b7e..62b5828 100644 (file)
@@ -8,7 +8,7 @@
 #include "tfrc.h"
 
 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-int tfrc_debug;
+bool tfrc_debug;
 module_param(tfrc_debug, bool, 0644);
 MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
 #endif
index f8ee3f5..ed698c4 100644 (file)
@@ -21,7 +21,7 @@
 #include "packet_history.h"
 
 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-extern int tfrc_debug;
+extern bool tfrc_debug;
 #define tfrc_pr_debug(format, a...)    DCCP_PR_DEBUG(tfrc_debug, format, ##a)
 #else
 #define tfrc_pr_debug(format, a...)
index 583490a..29d6bb6 100644 (file)
@@ -39,7 +39,7 @@
                                                  "%s: " fmt, __func__, ##a)
 
 #ifdef CONFIG_IP_DCCP_DEBUG
-extern int dccp_debug;
+extern bool dccp_debug;
 #define dccp_pr_debug(format, a...)      DCCP_PR_DEBUG(dccp_debug, format, ##a)
 #define dccp_pr_debug_cat(format, a...)   DCCP_PRINTK(dccp_debug, format, ##a)
 #define dccp_debug(fmt, a...)            dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
@@ -357,7 +357,7 @@ static inline int dccp_bad_service_code(const struct sock *sk,
 struct dccp_skb_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
index b21f261..8f16257 100644 (file)
@@ -48,11 +48,23 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                dccp_get_info(sk, _info);
 }
 
+static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
+}
+
+static int dccp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
+}
+
 static const struct inet_diag_handler dccp_diag_handler = {
-       .idiag_hashinfo  = &dccp_hashinfo,
+       .dump            = dccp_diag_dump,
+       .dump_one        = dccp_diag_dump_one,
        .idiag_get_info  = dccp_diag_get_info,
-       .idiag_type      = DCCPDIAG_GETSOCK,
-       .idiag_info_size = sizeof(struct tcp_info),
+       .idiag_type      = IPPROTO_DCCP,
 };
 
 static int __init dccp_diag_init(void)
@@ -71,4 +83,4 @@ module_exit(dccp_diag_fini);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
 MODULE_DESCRIPTION("DCCP inet_diag handler");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, DCCPDIAG_GETSOCK);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-33 /* AF_INET - IPPROTO_DCCP */);
index 23cea0e..78a2ad7 100644 (file)
@@ -490,8 +490,8 @@ static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local,
        new->feat_num        = feat;
        new->is_local        = local;
        new->state           = FEAT_INITIALISING;
-       new->needs_confirm   = 0;
-       new->empty_confirm   = 0;
+       new->needs_confirm   = false;
+       new->empty_confirm   = false;
        new->val             = *fval;
        new->needs_mandatory = mandatory;
 
@@ -517,12 +517,12 @@ static int dccp_feat_push_confirm(struct list_head *fn_list, u8 feat, u8 local,
        new->feat_num        = feat;
        new->is_local        = local;
        new->state           = FEAT_STABLE;     /* transition in 6.6.2 */
-       new->needs_confirm   = 1;
+       new->needs_confirm   = true;
        new->empty_confirm   = (fval == NULL);
        new->val.nn          = 0;               /* zeroes the whole structure */
        if (!new->empty_confirm)
                new->val     = *fval;
-       new->needs_mandatory = 0;
+       new->needs_mandatory = false;
 
        return 0;
 }
@@ -1155,7 +1155,7 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt,
        }
 
        if (dccp_feat_reconcile(&entry->val, val, len, server, true)) {
-               entry->empty_confirm = 0;
+               entry->empty_confirm = false;
        } else if (is_mandatory) {
                return DCCP_RESET_CODE_MANDATORY_ERROR;
        } else if (entry->state == FEAT_INITIALISING) {
@@ -1171,10 +1171,10 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt,
                defval = dccp_feat_default_value(feat);
                if (!dccp_feat_reconcile(&entry->val, &defval, 1, server, true))
                        return DCCP_RESET_CODE_OPTION_ERROR;
-               entry->empty_confirm = 1;
+               entry->empty_confirm = true;
        }
-       entry->needs_confirm   = 1;
-       entry->needs_mandatory = 0;
+       entry->needs_confirm   = true;
+       entry->needs_mandatory = false;
        entry->state           = FEAT_STABLE;
        return 0;
 
index b50d5fd..5a7f90b 100644 (file)
@@ -53,7 +53,7 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
        if (tw != NULL) {
                const struct inet_connection_sock *icsk = inet_csk(sk);
                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
                        const struct ipv6_pinfo *np = inet6_sk(sk);
                        struct inet6_timewait_sock *tw6;
index 4b2ab65..68fa6b7 100644 (file)
@@ -544,7 +544,7 @@ int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
        }
 
        if (unlikely(val == NULL || len == 0))
-               len = repeat_first = 0;
+               len = repeat_first = false;
        tot_len = 3 + repeat_first + len;
 
        if (DCCP_SKB_CB(skb)->dccpd_opt_len + tot_len > DCCP_MAX_OPT_LEN) {
index e742f90..7065c0a 100644 (file)
@@ -1099,7 +1099,7 @@ module_param(thash_entries, int, 0444);
 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
 
 #ifdef CONFIG_IP_DCCP_DEBUG
-int dccp_debug;
+bool dccp_debug;
 module_param(dccp_debug, bool, 0644);
 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
 
index 3532ac6..7d2fff2 100644 (file)
@@ -202,7 +202,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct dn_route *rt = (struct dn_route *)dst;
-       struct neighbour *neigh = dst_get_neighbour(dst);
+       struct neighbour *neigh = dst_get_neighbour_noref(dst);
        struct net_device *dev = neigh->dev;
        char mac_addr[ETH_ALEN];
 
index 94f4ec0..f31ce72 100644 (file)
@@ -244,7 +244,7 @@ static int dn_dst_gc(struct dst_ops *ops)
  */
 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
-       struct neighbour *n = dst_get_neighbour(dst);
+       struct neighbour *n = dst_get_neighbour_noref(dst);
        u32 min_mtu = 230;
        struct dn_dev *dn;
 
@@ -713,7 +713,7 @@ out:
 static int dn_to_neigh_output(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
-       struct neighbour *n = dst_get_neighbour(dst);
+       struct neighbour *n = dst_get_neighbour_noref(dst);
 
        return n->output(n, skb);
 }
@@ -728,7 +728,7 @@ static int dn_output(struct sk_buff *skb)
 
        int err = -EINVAL;
 
-       if ((neigh = dst_get_neighbour(dst)) == NULL)
+       if ((neigh = dst_get_neighbour_noref(dst)) == NULL)
                goto error;
 
        skb->dev = dev;
@@ -852,7 +852,7 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
        }
        rt->rt_type = res->type;
 
-       if (dev != NULL && dst_get_neighbour(&rt->dst) == NULL) {
+       if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) {
                n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
                if (IS_ERR(n))
                        return PTR_ERR(n);
index cbb505b..1a8f93b 100644 (file)
@@ -409,6 +409,10 @@ config INET_TCP_DIAG
        depends on INET_DIAG
        def_tristate INET_DIAG
 
+config INET_UDP_DIAG
+       depends on INET_DIAG
+       def_tristate INET_DIAG && IPV6
+
 menuconfig TCP_CONG_ADVANCED
        bool "TCP: advanced congestion control"
        ---help---
index f2dc69c..ff75d3b 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_IP_PNP) += ipconfig.o
 obj-$(CONFIG_NETFILTER)        += netfilter.o netfilter/
 obj-$(CONFIG_INET_DIAG) += inet_diag.o 
 obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
+obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
 obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
 obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
 obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
 obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
 obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
 obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
+obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
index 15dc4c4..f7b5670 100644 (file)
@@ -1672,6 +1672,8 @@ static int __init inet_init(void)
        ip_static_sysctl_init();
 #endif
 
+       tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
+
        /*
         *      Add all the base protocols.
         */
index ff324eb..381a087 100644 (file)
@@ -277,9 +277,9 @@ static int arp_constructor(struct neighbour *neigh)
                default:
                        break;
                case ARPHRD_ROSE:
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
                case ARPHRD_AX25:
-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#if IS_ENABLED(CONFIG_NETROM)
                case ARPHRD_NETROM:
 #endif
                        neigh->ops = &arp_broken_ops;
@@ -629,13 +629,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
                arp->ar_pro = htons(ETH_P_IP);
                break;
 
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
        case ARPHRD_AX25:
                arp->ar_hrd = htons(ARPHRD_AX25);
                arp->ar_pro = htons(AX25_P_IP);
                break;
 
-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#if IS_ENABLED(CONFIG_NETROM)
        case ARPHRD_NETROM:
                arp->ar_hrd = htons(ARPHRD_NETROM);
                arp->ar_pro = htons(AX25_P_IP);
@@ -643,13 +643,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
 #endif
 #endif
 
-#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
+#if IS_ENABLED(CONFIG_FDDI)
        case ARPHRD_FDDI:
                arp->ar_hrd = htons(ARPHRD_ETHER);
                arp->ar_pro = htons(ETH_P_IP);
                break;
 #endif
-#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
+#if IS_ENABLED(CONFIG_TR)
        case ARPHRD_IEEE802_TR:
                arp->ar_hrd = htons(ARPHRD_IEEE802);
                arp->ar_pro = htons(ETH_P_IP);
@@ -1036,7 +1036,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
                        return -EINVAL;
        }
        switch (dev->type) {
-#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
+#if IS_ENABLED(CONFIG_FDDI)
        case ARPHRD_FDDI:
                /*
                 * According to RFC 1390, FDDI devices should accept ARP
@@ -1282,7 +1282,7 @@ void __init arp_init(void)
 }
 
 #ifdef CONFIG_PROC_FS
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
 
 /* ------------------------------------------------------------------------ */
 /*
@@ -1330,7 +1330,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
 
        read_lock(&n->lock);
        /* Convert hardware address to XX:XX:XX:XX ... form. */
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
        if (hatype == ARPHRD_AX25 || hatype == ARPHRD_NETROM)
                ax2asc2((ax25_address *)n->ha, hbuffer);
        else {
@@ -1343,7 +1343,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
        if (k != 0)
                --k;
        hbuffer[k] = 0;
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
        }
 #endif
        sprintf(tbuf, "%pI4", n->primary_key);
index a598768..2e4e244 100644 (file)
@@ -418,7 +418,7 @@ static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
        return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 #else
 #define AF_INET_FAMILY(fam) 1
index 0a46c54..fb2e47f 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/stddef.h>
 
 #include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
 
 static const struct inet_diag_handler **inet_diag_table;
 
@@ -45,24 +46,22 @@ struct inet_diag_entry {
        u16 userlocks;
 };
 
-static struct sock *idiagnl;
-
 #define INET_DIAG_PUT(skb, attrtype, attrlen) \
        RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
 
 static DEFINE_MUTEX(inet_diag_table_mutex);
 
-static const struct inet_diag_handler *inet_diag_lock_handler(int type)
+static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
 {
-       if (!inet_diag_table[type])
-               request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
-                              NETLINK_INET_DIAG, type);
+       if (!inet_diag_table[proto])
+               request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
+                              NETLINK_SOCK_DIAG, AF_INET, proto);
 
        mutex_lock(&inet_diag_table_mutex);
-       if (!inet_diag_table[type])
+       if (!inet_diag_table[proto])
                return ERR_PTR(-ENOENT);
 
-       return inet_diag_table[type];
+       return inet_diag_table[proto];
 }
 
 static inline void inet_diag_unlock_handler(
@@ -71,21 +70,21 @@ static inline void inet_diag_unlock_handler(
        mutex_unlock(&inet_diag_table_mutex);
 }
 
-static int inet_csk_diag_fill(struct sock *sk,
-                             struct sk_buff *skb,
-                             int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+                             struct sk_buff *skb, struct inet_diag_req *req,
+                             u32 pid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh)
 {
        const struct inet_sock *inet = inet_sk(sk);
-       const struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_diag_msg *r;
        struct nlmsghdr  *nlh;
        void *info = NULL;
        struct inet_diag_meminfo  *minfo = NULL;
        unsigned char    *b = skb_tail_pointer(skb);
        const struct inet_diag_handler *handler;
+       int ext = req->idiag_ext;
 
-       handler = inet_diag_table[unlh->nlmsg_type];
+       handler = inet_diag_table[req->sdiag_protocol];
        BUG_ON(handler == NULL);
 
        nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
@@ -97,25 +96,13 @@ static int inet_csk_diag_fill(struct sock *sk,
        if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
                minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
 
-       if (ext & (1 << (INET_DIAG_INFO - 1)))
-               info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
-                                    handler->idiag_info_size);
-
-       if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
-               const size_t len = strlen(icsk->icsk_ca_ops->name);
-
-               strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
-                      icsk->icsk_ca_ops->name);
-       }
-
        r->idiag_family = sk->sk_family;
        r->idiag_state = sk->sk_state;
        r->idiag_timer = 0;
        r->idiag_retrans = 0;
 
        r->id.idiag_if = sk->sk_bound_dev_if;
-       r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
-       r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+       sock_diag_save_cookie(sk, r->id.idiag_cookie);
 
        r->id.idiag_sport = inet->inet_sport;
        r->id.idiag_dport = inet->inet_dport;
@@ -128,7 +115,7 @@ static int inet_csk_diag_fill(struct sock *sk,
        if (ext & (1 << (INET_DIAG_TOS - 1)))
                RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
                const struct ipv6_pinfo *np = inet6_sk(sk);
 
@@ -139,6 +126,21 @@ static int inet_csk_diag_fill(struct sock *sk,
        }
 #endif
 
+       r->idiag_uid = sock_i_uid(sk);
+       r->idiag_inode = sock_i_ino(sk);
+
+       if (minfo) {
+               minfo->idiag_rmem = sk_rmem_alloc_get(sk);
+               minfo->idiag_wmem = sk->sk_wmem_queued;
+               minfo->idiag_fmem = sk->sk_forward_alloc;
+               minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+       }
+
+       if (icsk == NULL) {
+               r->idiag_rqueue = r->idiag_wqueue = 0;
+               goto out;
+       }
+
 #define EXPIRES_IN_MS(tmo)  DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
 
        if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
@@ -159,14 +161,14 @@ static int inet_csk_diag_fill(struct sock *sk,
        }
 #undef EXPIRES_IN_MS
 
-       r->idiag_uid = sock_i_uid(sk);
-       r->idiag_inode = sock_i_ino(sk);
+       if (ext & (1 << (INET_DIAG_INFO - 1)))
+               info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info));
 
-       if (minfo) {
-               minfo->idiag_rmem = sk_rmem_alloc_get(sk);
-               minfo->idiag_wmem = sk->sk_wmem_queued;
-               minfo->idiag_fmem = sk->sk_forward_alloc;
-               minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+       if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
+               const size_t len = strlen(icsk->icsk_ca_ops->name);
+
+               strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
+                      icsk->icsk_ca_ops->name);
        }
 
        handler->idiag_get_info(sk, r, info);
@@ -175,6 +177,7 @@ static int inet_csk_diag_fill(struct sock *sk,
            icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
                icsk->icsk_ca_ops->get_info(sk, ext, skb);
 
+out:
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
@@ -183,10 +186,20 @@ nlmsg_failure:
        nlmsg_trim(skb, b);
        return -EMSGSIZE;
 }
+EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
+
+static int inet_csk_diag_fill(struct sock *sk,
+                             struct sk_buff *skb, struct inet_diag_req *req,
+                             u32 pid, u32 seq, u16 nlmsg_flags,
+                             const struct nlmsghdr *unlh)
+{
+       return inet_sk_diag_fill(sk, inet_csk(sk),
+                       skb, req, pid, seq, nlmsg_flags, unlh);
+}
 
 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
-                              struct sk_buff *skb, int ext, u32 pid,
-                              u32 seq, u16 nlmsg_flags,
+                              struct sk_buff *skb, struct inet_diag_req *req,
+                              u32 pid, u32 seq, u16 nlmsg_flags,
                               const struct nlmsghdr *unlh)
 {
        long tmo;
@@ -207,8 +220,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r->idiag_family       = tw->tw_family;
        r->idiag_retrans      = 0;
        r->id.idiag_if        = tw->tw_bound_dev_if;
-       r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
-       r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
+       sock_diag_save_cookie(tw, r->id.idiag_cookie);
        r->id.idiag_sport     = tw->tw_sport;
        r->id.idiag_dport     = tw->tw_dport;
        r->id.idiag_src[0]    = tw->tw_rcv_saddr;
@@ -220,7 +232,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r->idiag_wqueue       = 0;
        r->idiag_uid          = 0;
        r->idiag_inode        = 0;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (tw->tw_family == AF_INET6) {
                const struct inet6_timewait_sock *tw6 =
                                                inet6_twsk((struct sock *)tw);
@@ -237,42 +249,31 @@ nlmsg_failure:
 }
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
-                       int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+                       struct inet_diag_req *r, u32 pid, u32 seq, u16 nlmsg_flags,
                        const struct nlmsghdr *unlh)
 {
        if (sk->sk_state == TCP_TIME_WAIT)
                return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
-                                          skb, ext, pid, seq, nlmsg_flags,
+                                          skb, r, pid, seq, nlmsg_flags,
                                           unlh);
-       return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh);
+       return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh);
 }
 
-static int inet_diag_get_exact(struct sk_buff *in_skb,
-                              const struct nlmsghdr *nlh)
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
+               const struct nlmsghdr *nlh, struct inet_diag_req *req)
 {
        int err;
        struct sock *sk;
-       struct inet_diag_req *req = NLMSG_DATA(nlh);
        struct sk_buff *rep;
-       struct inet_hashinfo *hashinfo;
-       const struct inet_diag_handler *handler;
 
-       handler = inet_diag_lock_handler(nlh->nlmsg_type);
-       if (IS_ERR(handler)) {
-               err = PTR_ERR(handler);
-               goto unlock;
-       }
-
-       hashinfo = handler->idiag_hashinfo;
        err = -EINVAL;
-
-       if (req->idiag_family == AF_INET) {
+       if (req->sdiag_family == AF_INET) {
                sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
                                 req->id.idiag_dport, req->id.idiag_src[0],
                                 req->id.idiag_sport, req->id.idiag_if);
        }
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-       else if (req->idiag_family == AF_INET6) {
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (req->sdiag_family == AF_INET6) {
                sk = inet6_lookup(&init_net, hashinfo,
                                  (struct in6_addr *)req->id.idiag_dst,
                                  req->id.idiag_dport,
@@ -282,29 +283,26 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
        }
 #endif
        else {
-               goto unlock;
+               goto out_nosk;
        }
 
        err = -ENOENT;
        if (sk == NULL)
-               goto unlock;
+               goto out_nosk;
 
-       err = -ESTALE;
-       if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
-            req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
-           ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
-            (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
+       err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
+       if (err)
                goto out;
 
        err = -ENOMEM;
        rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
                                     sizeof(struct inet_diag_meminfo) +
-                                    handler->idiag_info_size + 64)),
+                                    sizeof(struct tcp_info) + 64)),
                        GFP_KERNEL);
        if (!rep)
                goto out;
 
-       err = sk_diag_fill(sk, rep, req->idiag_ext,
+       err = sk_diag_fill(sk, rep, req,
                           NETLINK_CB(in_skb).pid,
                           nlh->nlmsg_seq, 0, nlh);
        if (err < 0) {
@@ -312,7 +310,7 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
                kfree_skb(rep);
                goto out;
        }
-       err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
+       err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
                              MSG_DONTWAIT);
        if (err > 0)
                err = 0;
@@ -324,8 +322,25 @@ out:
                else
                        sock_put(sk);
        }
-unlock:
+out_nosk:
+       return err;
+}
+EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
+
+static int inet_diag_get_exact(struct sk_buff *in_skb,
+                              const struct nlmsghdr *nlh,
+                              struct inet_diag_req *req)
+{
+       const struct inet_diag_handler *handler;
+       int err;
+
+       handler = inet_diag_lock_handler(req->sdiag_protocol);
+       if (IS_ERR(handler))
+               err = PTR_ERR(handler);
+       else
+               err = handler->dump_one(in_skb, nlh, req);
        inet_diag_unlock_handler(handler);
+
        return err;
 }
 
@@ -356,9 +371,12 @@ static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
 }
 
 
-static int inet_diag_bc_run(const void *bc, int len,
-                           const struct inet_diag_entry *entry)
+static int inet_diag_bc_run(const struct nlattr *_bc,
+               const struct inet_diag_entry *entry)
 {
+       const void *bc = nla_data(_bc);
+       int len = nla_len(_bc);
+
        while (len > 0) {
                int yes = 1;
                const struct inet_diag_bc_op *op = bc;
@@ -432,6 +450,35 @@ static int inet_diag_bc_run(const void *bc, int len,
        return len == 0;
 }
 
+int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
+{
+       struct inet_diag_entry entry;
+       struct inet_sock *inet = inet_sk(sk);
+
+       if (bc == NULL)
+               return 1;
+
+       entry.family = sk->sk_family;
+#if IS_ENABLED(CONFIG_IPV6)
+       if (entry.family == AF_INET6) {
+               struct ipv6_pinfo *np = inet6_sk(sk);
+
+               entry.saddr = np->rcv_saddr.s6_addr32;
+               entry.daddr = np->daddr.s6_addr32;
+       } else
+#endif
+       {
+               entry.saddr = &inet->inet_rcv_saddr;
+               entry.daddr = &inet->inet_daddr;
+       }
+       entry.sport = inet->inet_num;
+       entry.dport = ntohs(inet->inet_dport);
+       entry.userlocks = sk->sk_userlocks;
+
+       return inet_diag_bc_run(bc, &entry);
+}
+EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
+
 static int valid_cc(const void *bc, int len, int cc)
 {
        while (len >= 0) {
@@ -488,57 +535,29 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
 
 static int inet_csk_diag_dump(struct sock *sk,
                              struct sk_buff *skb,
-                             struct netlink_callback *cb)
+                             struct netlink_callback *cb,
+                             struct inet_diag_req *r,
+                             const struct nlattr *bc)
 {
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
+       if (!inet_diag_bc_sk(bc, sk))
+               return 0;
 
-       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
-               struct inet_diag_entry entry;
-               const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
-                                                         sizeof(*r),
-                                                         INET_DIAG_REQ_BYTECODE);
-               struct inet_sock *inet = inet_sk(sk);
-
-               entry.family = sk->sk_family;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-               if (entry.family == AF_INET6) {
-                       struct ipv6_pinfo *np = inet6_sk(sk);
-
-                       entry.saddr = np->rcv_saddr.s6_addr32;
-                       entry.daddr = np->daddr.s6_addr32;
-               } else
-#endif
-               {
-                       entry.saddr = &inet->inet_rcv_saddr;
-                       entry.daddr = &inet->inet_daddr;
-               }
-               entry.sport = inet->inet_num;
-               entry.dport = ntohs(inet->inet_dport);
-               entry.userlocks = sk->sk_userlocks;
-
-               if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
-                       return 0;
-       }
-
-       return inet_csk_diag_fill(sk, skb, r->idiag_ext,
+       return inet_csk_diag_fill(sk, skb, r,
                                  NETLINK_CB(cb->skb).pid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
                               struct sk_buff *skb,
-                              struct netlink_callback *cb)
+                              struct netlink_callback *cb,
+                              struct inet_diag_req *r,
+                              const struct nlattr *bc)
 {
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
-
-       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+       if (bc != NULL) {
                struct inet_diag_entry entry;
-               const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
-                                                         sizeof(*r),
-                                                         INET_DIAG_REQ_BYTECODE);
 
                entry.family = tw->tw_family;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == AF_INET6) {
                        struct inet6_timewait_sock *tw6 =
                                                inet6_twsk((struct sock *)tw);
@@ -554,11 +573,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
                entry.dport = ntohs(tw->tw_dport);
                entry.userlocks = 0;
 
-               if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
+               if (!inet_diag_bc_run(bc, &entry))
                        return 0;
        }
 
-       return inet_twsk_diag_fill(tw, skb, r->idiag_ext,
+       return inet_twsk_diag_fill(tw, skb, r,
                                   NETLINK_CB(cb->skb).pid,
                                   cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
@@ -584,8 +603,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
        r->idiag_retrans = req->retrans;
 
        r->id.idiag_if = sk->sk_bound_dev_if;
-       r->id.idiag_cookie[0] = (u32)(unsigned long)req;
-       r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
+       sock_diag_save_cookie(req, r->id.idiag_cookie);
 
        tmo = req->expires - jiffies;
        if (tmo < 0)
@@ -600,7 +618,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
        r->idiag_wqueue = 0;
        r->idiag_uid = sock_i_uid(sk);
        r->idiag_inode = 0;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
                *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
                *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
@@ -616,13 +634,13 @@ nlmsg_failure:
 }
 
 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
-                              struct netlink_callback *cb)
+                              struct netlink_callback *cb,
+                              struct inet_diag_req *r,
+                              const struct nlattr *bc)
 {
        struct inet_diag_entry entry;
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct listen_sock *lopt;
-       const struct nlattr *bc = NULL;
        struct inet_sock *inet = inet_sk(sk);
        int j, s_j;
        int reqnum, s_reqnum;
@@ -642,9 +660,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
        if (!lopt || !lopt->qlen)
                goto out;
 
-       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
-               bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
-                                    INET_DIAG_REQ_BYTECODE);
+       if (bc != NULL) {
                entry.sport = inet->inet_num;
                entry.userlocks = sk->sk_userlocks;
        }
@@ -664,21 +680,20 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
 
                        if (bc) {
                                entry.saddr =
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                                        (entry.family == AF_INET6) ?
                                        inet6_rsk(req)->loc_addr.s6_addr32 :
 #endif
                                        &ireq->loc_addr;
                                entry.daddr =
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                                        (entry.family == AF_INET6) ?
                                        inet6_rsk(req)->rmt_addr.s6_addr32 :
 #endif
                                        &ireq->rmt_addr;
                                entry.dport = ntohs(ireq->rmt_port);
 
-                               if (!inet_diag_bc_run(nla_data(bc),
-                                                     nla_len(bc), &entry))
+                               if (!inet_diag_bc_run(bc, &entry))
                                        continue;
                        }
 
@@ -701,19 +716,11 @@ out:
        return err;
 }
 
-static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
+               struct netlink_callback *cb, struct inet_diag_req *r, struct nlattr *bc)
 {
        int i, num;
        int s_i, s_num;
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
-       const struct inet_diag_handler *handler;
-       struct inet_hashinfo *hashinfo;
-
-       handler = inet_diag_lock_handler(cb->nlh->nlmsg_type);
-       if (IS_ERR(handler))
-               goto unlock;
-
-       hashinfo = handler->idiag_hashinfo;
 
        s_i = cb->args[1];
        s_num = num = cb->args[2];
@@ -738,6 +745,10 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                        continue;
                                }
 
+                               if (r->sdiag_family != AF_UNSPEC &&
+                                               sk->sk_family != r->sdiag_family)
+                                       goto next_listen;
+
                                if (r->id.idiag_sport != inet->inet_sport &&
                                    r->id.idiag_sport)
                                        goto next_listen;
@@ -747,7 +758,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                    cb->args[3] > 0)
                                        goto syn_recv;
 
-                               if (inet_csk_diag_dump(sk, skb, cb) < 0) {
+                               if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
                                        spin_unlock_bh(&ilb->lock);
                                        goto done;
                                }
@@ -756,7 +767,7 @@ syn_recv:
                                if (!(r->idiag_states & TCPF_SYN_RECV))
                                        goto next_listen;
 
-                               if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
+                               if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
                                        spin_unlock_bh(&ilb->lock);
                                        goto done;
                                }
@@ -778,7 +789,7 @@ skip_listen_ht:
        }
 
        if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
-               goto unlock;
+               goto out;
 
        for (i = s_i; i <= hashinfo->ehash_mask; i++) {
                struct inet_ehash_bucket *head = &hashinfo->ehash[i];
@@ -803,13 +814,16 @@ skip_listen_ht:
                                goto next_normal;
                        if (!(r->idiag_states & (1 << sk->sk_state)))
                                goto next_normal;
+                       if (r->sdiag_family != AF_UNSPEC &&
+                                       sk->sk_family != r->sdiag_family)
+                               goto next_normal;
                        if (r->id.idiag_sport != inet->inet_sport &&
                            r->id.idiag_sport)
                                goto next_normal;
                        if (r->id.idiag_dport != inet->inet_dport &&
                            r->id.idiag_dport)
                                goto next_normal;
-                       if (inet_csk_diag_dump(sk, skb, cb) < 0) {
+                       if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
                                spin_unlock_bh(lock);
                                goto done;
                        }
@@ -825,13 +839,16 @@ next_normal:
 
                                if (num < s_num)
                                        goto next_dying;
+                               if (r->sdiag_family != AF_UNSPEC &&
+                                               tw->tw_family != r->sdiag_family)
+                                       goto next_dying;
                                if (r->id.idiag_sport != tw->tw_sport &&
                                    r->id.idiag_sport)
                                        goto next_dying;
                                if (r->id.idiag_dport != tw->tw_dport &&
                                    r->id.idiag_dport)
                                        goto next_dying;
-                               if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
+                               if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
                                        spin_unlock_bh(lock);
                                        goto done;
                                }
@@ -845,15 +862,85 @@ next_dying:
 done:
        cb->args[1] = i;
        cb->args[2] = num;
-unlock:
+out:
+       ;
+}
+EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
+
+static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       const struct inet_diag_handler *handler;
+
+       handler = inet_diag_lock_handler(r->sdiag_protocol);
+       if (!IS_ERR(handler))
+               handler->dump(skb, cb, r, bc);
        inet_diag_unlock_handler(handler);
+
        return skb->len;
 }
 
-static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct nlattr *bc = NULL;
        int hdrlen = sizeof(struct inet_diag_req);
 
+       if (nlmsg_attrlen(cb->nlh, hdrlen))
+               bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+
+       return __inet_diag_dump(skb, cb, (struct inet_diag_req *)NLMSG_DATA(cb->nlh), bc);
+}
+
+static inline int inet_diag_type2proto(int type)
+{
+       switch (type) {
+       case TCPDIAG_GETSOCK:
+               return IPPROTO_TCP;
+       case DCCPDIAG_GETSOCK:
+               return IPPROTO_DCCP;
+       default:
+               return 0;
+       }
+}
+
+static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct inet_diag_req_compat *rc = NLMSG_DATA(cb->nlh);
+       struct inet_diag_req req;
+       struct nlattr *bc = NULL;
+       int hdrlen = sizeof(struct inet_diag_req_compat);
+
+       req.sdiag_family = AF_UNSPEC; /* compatibility */
+       req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
+       req.idiag_ext = rc->idiag_ext;
+       req.idiag_states = rc->idiag_states;
+       req.id = rc->id;
+
+       if (nlmsg_attrlen(cb->nlh, hdrlen))
+               bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+
+       return __inet_diag_dump(skb, cb, &req, bc);
+}
+
+static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
+                              const struct nlmsghdr *nlh)
+{
+       struct inet_diag_req_compat *rc = NLMSG_DATA(nlh);
+       struct inet_diag_req req;
+
+       req.sdiag_family = rc->idiag_family;
+       req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
+       req.idiag_ext = rc->idiag_ext;
+       req.idiag_states = rc->idiag_states;
+       req.id = rc->id;
+
+       return inet_diag_get_exact(in_skb, nlh, &req);
+}
+
+static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       int hdrlen = sizeof(struct inet_diag_req_compat);
+
        if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
            nlmsg_len(nlh) < hdrlen)
                return -EINVAL;
@@ -870,28 +957,54 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                return -EINVAL;
                }
 
-               return netlink_dump_start(idiagnl, skb, nlh,
-                                         inet_diag_dump, NULL, 0);
+               return netlink_dump_start(sock_diag_nlsk, skb, nlh,
+                                         inet_diag_dump_compat, NULL, 0);
        }
 
-       return inet_diag_get_exact(skb, nlh);
+       return inet_diag_get_exact_compat(skb, nlh);
 }
 
-static DEFINE_MUTEX(inet_diag_mutex);
-
-static void inet_diag_rcv(struct sk_buff *skb)
+static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
 {
-       mutex_lock(&inet_diag_mutex);
-       netlink_rcv_skb(skb, &inet_diag_rcv_msg);
-       mutex_unlock(&inet_diag_mutex);
+       int hdrlen = sizeof(struct inet_diag_req);
+
+       if (nlmsg_len(h) < hdrlen)
+               return -EINVAL;
+
+       if (h->nlmsg_flags & NLM_F_DUMP) {
+               if (nlmsg_attrlen(h, hdrlen)) {
+                       struct nlattr *attr;
+                       attr = nlmsg_find_attr(h, hdrlen,
+                                              INET_DIAG_REQ_BYTECODE);
+                       if (attr == NULL ||
+                           nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
+                           inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
+                               return -EINVAL;
+               }
+
+               return netlink_dump_start(sock_diag_nlsk, skb, h,
+                                         inet_diag_dump, NULL, 0);
+       }
+
+       return inet_diag_get_exact(skb, h, (struct inet_diag_req *)NLMSG_DATA(h));
 }
 
+static struct sock_diag_handler inet_diag_handler = {
+       .family = AF_INET,
+       .dump = inet_diag_handler_dump,
+};
+
+static struct sock_diag_handler inet6_diag_handler = {
+       .family = AF_INET6,
+       .dump = inet_diag_handler_dump,
+};
+
 int inet_diag_register(const struct inet_diag_handler *h)
 {
        const __u16 type = h->idiag_type;
        int err = -EINVAL;
 
-       if (type >= INET_DIAG_GETSOCK_MAX)
+       if (type >= IPPROTO_MAX)
                goto out;
 
        mutex_lock(&inet_diag_table_mutex);
@@ -910,7 +1023,7 @@ void inet_diag_unregister(const struct inet_diag_handler *h)
 {
        const __u16 type = h->idiag_type;
 
-       if (type >= INET_DIAG_GETSOCK_MAX)
+       if (type >= IPPROTO_MAX)
                return;
 
        mutex_lock(&inet_diag_table_mutex);
@@ -921,7 +1034,7 @@ EXPORT_SYMBOL_GPL(inet_diag_unregister);
 
 static int __init inet_diag_init(void)
 {
-       const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX *
+       const int inet_diag_table_size = (IPPROTO_MAX *
                                          sizeof(struct inet_diag_handler *));
        int err = -ENOMEM;
 
@@ -929,25 +1042,35 @@ static int __init inet_diag_init(void)
        if (!inet_diag_table)
                goto out;
 
-       idiagnl = netlink_kernel_create(&init_net, NETLINK_INET_DIAG, 0,
-                                       inet_diag_rcv, NULL, THIS_MODULE);
-       if (idiagnl == NULL)
-               goto out_free_table;
-       err = 0;
+       err = sock_diag_register(&inet_diag_handler);
+       if (err)
+               goto out_free_nl;
+
+       err = sock_diag_register(&inet6_diag_handler);
+       if (err)
+               goto out_free_inet;
+
+       sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
 out:
        return err;
-out_free_table:
+
+out_free_inet:
+       sock_diag_unregister(&inet_diag_handler);
+out_free_nl:
        kfree(inet_diag_table);
        goto out;
 }
 
 static void __exit inet_diag_exit(void)
 {
-       netlink_kernel_release(idiagnl);
+       sock_diag_unregister(&inet6_diag_handler);
+       sock_diag_unregister(&inet_diag_handler);
+       sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
        kfree(inet_diag_table);
 }
 
 module_init(inet_diag_init);
 module_exit(inet_diag_exit);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);
index 2b32296..2b53a1f 100644 (file)
@@ -46,7 +46,7 @@
 #include <net/rtnetlink.h>
 #include <net/gre.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
@@ -729,9 +729,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        if ((dst = rt->rt_gateway) == 0)
                                goto tx_error_icmp;
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                else if (skb->protocol == htons(ETH_P_IPV6)) {
-                       struct neighbour *neigh = dst_get_neighbour(skb_dst(skb));
+                       struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb));
                        const struct in6_addr *addr6;
                        int addr_type;
 
@@ -799,7 +799,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        goto tx_error;
                }
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        else if (skb->protocol == htons(ETH_P_IPV6)) {
                struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
 
@@ -875,7 +875,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        if ((iph->ttl = tiph->ttl) == 0) {
                if (skb->protocol == htons(ETH_P_IP))
                        iph->ttl = old_iph->ttl;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                else if (skb->protocol == htons(ETH_P_IPV6))
                        iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
 #endif
index 0d5e567..ff302bd 100644 (file)
@@ -206,7 +206,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        }
 
        rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
+       neigh = dst_get_neighbour_noref(dst);
        if (neigh) {
                int res = neigh_output(neigh, skb);
 
index 80d5fa4..8aa87c1 100644 (file)
@@ -37,7 +37,7 @@
 #include <net/route.h>
 #include <net/xfrm.h>
 #include <net/compat.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/transp_v6.h>
 #endif
 
@@ -508,7 +508,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                                                sock_owned_by_user(sk));
                if (inet->is_icsk) {
                        struct inet_connection_sock *icsk = inet_csk(sk);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        if (sk->sk_family == PF_INET ||
                            (!((1 << sk->sk_state) &
                               (TCPF_LISTEN | TCPF_CLOSE)) &&
@@ -519,7 +519,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                                if (opt)
                                        icsk->icsk_ext_hdr_len += opt->opt.optlen;
                                icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        }
 #endif
                }
index 915eb52..7e4ec9f 100644 (file)
@@ -253,6 +253,10 @@ static int __init ic_open_devs(void)
                }
        }
 
+       /* no point in waiting if we could not bring up at least one device */
+       if (!ic_first_dev)
+               goto have_carrier;
+
        /* wait for a carrier on at least one device */
        start = jiffies;
        while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
index 9490690..413ed1b 100644 (file)
@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
        if (register_netdevice(dev) < 0)
                goto failed_free;
 
+       strcpy(nt->parms.name, dev->name);
+
        dev_hold(dev);
        ipip_tunnel_link(ipn, nt);
        return nt;
@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
-       strcpy(tunnel->parms.name, dev->name);
 
        memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
        memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
 static int __net_init ipip_init_net(struct net *net)
 {
        struct ipip_net *ipn = net_generic(net, ipip_net_id);
+       struct ip_tunnel *t;
        int err;
 
        ipn->tunnels[0] = ipn->tunnels_wc;
@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
        if ((err = register_netdev(ipn->fb_tunnel_dev)))
                goto err_reg_dev;
 
+       t = netdev_priv(ipn->fb_tunnel_dev);
+
+       strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
        return 0;
 
 err_reg_dev:
index b550815..ba5756d 100644 (file)
@@ -65,7 +65,7 @@ static unsigned int flushtimeout = 10;
 module_param(flushtimeout, uint, 0600);
 MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths of a second)");
 
-static int nflog = 1;
+static bool nflog = true;
 module_param(nflog, bool, 0400);
 MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
 
index c37641e..0e58f09 100644 (file)
@@ -52,7 +52,7 @@ iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
 static struct nf_hook_ops *filter_ops __read_mostly;
 
 /* Default to forward because I got too much mail already. */
-static int forward = NF_ACCEPT;
+static bool forward = NF_ACCEPT;
 module_param(forward, bool, 0000);
 
 static int __net_init iptable_filter_net_init(struct net *net)
index 961eed4..3569d8e 100644 (file)
@@ -56,17 +56,17 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
 
        local_bh_disable();
        orphans = percpu_counter_sum_positive(&tcp_orphan_count);
-       sockets = percpu_counter_sum_positive(&tcp_sockets_allocated);
+       sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
        local_bh_enable();
 
        socket_seq_show(seq);
        seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
                   sock_prot_inuse_get(net, &tcp_prot), orphans,
                   tcp_death_row.tw_count, sockets,
-                  atomic_long_read(&tcp_memory_allocated));
+                  proto_memory_allocated(&tcp_prot));
        seq_printf(seq, "UDP: inuse %d mem %ld\n",
                   sock_prot_inuse_get(net, &udp_prot),
-                  atomic_long_read(&udp_memory_allocated));
+                  proto_memory_allocated(&udp_prot));
        seq_printf(seq, "UDPLITE: inuse %d\n",
                   sock_prot_inuse_get(net, &udplite_prot));
        seq_printf(seq, "RAW: inuse %d\n",
index 7047069..bcacf54 100644 (file)
@@ -91,6 +91,7 @@
 #include <linux/rcupdate.h>
 #include <linux/times.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/dst.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
 #include <net/secure_seq.h>
 
 #define RT_FL_TOS(oldflp4) \
-    ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
+       ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
 
 #define IP_MAX_MTU     0xFFF0
 
 
 static int ip_rt_max_size;
 static int ip_rt_gc_timeout __read_mostly      = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
 static int ip_rt_redirect_number __read_mostly = 9;
 static int ip_rt_redirect_load __read_mostly   = HZ / 50;
@@ -132,6 +134,9 @@ static int ip_rt_min_advmss __read_mostly   = 256;
 static int rt_chain_length_max __read_mostly   = 20;
 static int redirect_genid;
 
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
+
 /*
  *     Interface to generic destination cache.
  */
@@ -419,7 +424,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
                int len, HHUptod;
 
                rcu_read_lock();
-               n = dst_get_neighbour(&r->dst);
+               n = dst_get_neighbour_noref(&r->dst);
                HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
                rcu_read_unlock();
 
@@ -829,6 +834,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
        return ONE;
 }
 
+static void rt_check_expire(void)
+{
+       static unsigned int rover;
+       unsigned int i = rover, goal;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
+       unsigned long samples = 0;
+       unsigned long sum = 0, sum2 = 0;
+       unsigned long delta;
+       u64 mult;
+
+       delta = jiffies - expires_ljiffies;
+       expires_ljiffies = jiffies;
+       mult = ((u64)delta) << rt_hash_log;
+       if (ip_rt_gc_timeout > 1)
+               do_div(mult, ip_rt_gc_timeout);
+       goal = (unsigned int)mult;
+       if (goal > rt_hash_mask)
+               goal = rt_hash_mask + 1;
+       for (; goal > 0; goal--) {
+               unsigned long tmo = ip_rt_gc_timeout;
+               unsigned long length;
+
+               i = (i + 1) & rt_hash_mask;
+               rthp = &rt_hash_table[i].chain;
+
+               if (need_resched())
+                       cond_resched();
+
+               samples++;
+
+               if (rcu_dereference_raw(*rthp) == NULL)
+                       continue;
+               length = 0;
+               spin_lock_bh(rt_hash_lock_addr(i));
+               while ((rth = rcu_dereference_protected(*rthp,
+                                       lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+                       prefetch(rth->dst.rt_next);
+                       if (rt_is_expired(rth)) {
+                               *rthp = rth->dst.rt_next;
+                               rt_free(rth);
+                               continue;
+                       }
+                       if (rth->dst.expires) {
+                               /* Entry is expired even if it is in use */
+                               if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+                                       tmo >>= 1;
+                                       rthp = &rth->dst.rt_next;
+                                       /*
+                                        * We only count entries on
+                                        * a chain with equal hash inputs once
+                                        * so that entries for different QOS
+                                        * levels, and other non-hash input
+                                        * attributes don't unfairly skew
+                                        * the length computation
+                                        */
+                                       length += has_noalias(rt_hash_table[i].chain, rth);
+                                       continue;
+                               }
+                       } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+                               goto nofree;
+
+                       /* Cleanup aged off entries. */
+                       *rthp = rth->dst.rt_next;
+                       rt_free(rth);
+               }
+               spin_unlock_bh(rt_hash_lock_addr(i));
+               sum += length;
+               sum2 += length*length;
+       }
+       if (samples) {
+               unsigned long avg = sum / samples;
+               unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+               rt_chain_length_max = max_t(unsigned long,
+                                       ip_rt_gc_elasticity,
+                                       (avg + 4*sd) >> FRACT_BITS);
+       }
+       rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+       rt_check_expire();
+       schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
 /*
  * Perturbation of rt_genid by a small quantity [1..256]
  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1265,7 +1361,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
 {
        struct rtable *rt = (struct rtable *) dst;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                if (rt->peer == NULL)
                        rt_bind_peer(rt, rt->rt_dst, 1);
 
@@ -1276,7 +1372,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
                        iph->id = htons(inet_getid(rt->peer, more));
                        return;
                }
-       } else
+       } else if (!rt)
                printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
                       __builtin_return_address(0));
 
@@ -1304,7 +1400,7 @@ static void rt_del(unsigned hash, struct rtable *rt)
        spin_unlock_bh(rt_hash_lock_addr(hash));
 }
 
-static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
 {
        struct rtable *rt = (struct rtable *) dst;
        __be32 orig_gw = rt->rt_gateway;
@@ -1315,21 +1411,19 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
        rt->rt_gateway = peer->redirect_learned.a4;
 
        n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
-       if (IS_ERR(n))
-               return PTR_ERR(n);
+       if (IS_ERR(n)) {
+               rt->rt_gateway = orig_gw;
+               return;
+       }
        old_n = xchg(&rt->dst._neighbour, n);
        if (old_n)
                neigh_release(old_n);
-       if (!n || !(n->nud_state & NUD_VALID)) {
-               if (n)
-                       neigh_event_send(n, NULL);
-               rt->rt_gateway = orig_gw;
-               return -EAGAIN;
+       if (!(n->nud_state & NUD_VALID)) {
+               neigh_event_send(n, NULL);
        } else {
                rt->rt_flags |= RTCF_REDIRECTED;
                call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
        }
-       return 0;
 }
 
 /* called in rcu_read_lock() section */
@@ -1687,7 +1781,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
 }
 
 
-static struct rtable *ipv4_validate_peer(struct rtable *rt)
+static void ipv4_validate_peer(struct rtable *rt)
 {
        if (rt->rt_peer_genid != rt_peer_genid()) {
                struct inet_peer *peer;
@@ -1702,15 +1796,12 @@ static struct rtable *ipv4_validate_peer(struct rtable *rt)
                        if (peer->redirect_genid != redirect_genid)
                                peer->redirect_learned.a4 = 0;
                        if (peer->redirect_learned.a4 &&
-                           peer->redirect_learned.a4 != rt->rt_gateway) {
-                               if (check_peer_redir(&rt->dst, peer))
-                                       return NULL;
-                       }
+                           peer->redirect_learned.a4 != rt->rt_gateway)
+                               check_peer_redir(&rt->dst, peer);
                }
 
                rt->rt_peer_genid = rt_peer_genid();
        }
-       return rt;
 }
 
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
@@ -1719,7 +1810,7 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 
        if (rt_is_expired(rt))
                return NULL;
-       dst = (struct dst_entry *) ipv4_validate_peer(rt);
+       ipv4_validate_peer(rt);
        return dst;
 }
 
@@ -2374,9 +2465,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                    rth->rt_mark == skb->mark &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
-                       rth = ipv4_validate_peer(rth);
-                       if (!rth)
-                               continue;
+                       ipv4_validate_peer(rth);
                        if (noref) {
                                dst_use_noref(&rth->dst, jiffies);
                                skb_dst_set_noref(skb, &rth->dst);
@@ -2435,11 +2524,11 @@ EXPORT_SYMBOL(ip_route_input_common);
 static struct rtable *__mkroute_output(const struct fib_result *res,
                                       const struct flowi4 *fl4,
                                       __be32 orig_daddr, __be32 orig_saddr,
-                                      int orig_oif, struct net_device *dev_out,
+                                      int orig_oif, __u8 orig_rtos,
+                                      struct net_device *dev_out,
                                       unsigned int flags)
 {
        struct fib_info *fi = res->fi;
-       u32 tos = RT_FL_TOS(fl4);
        struct in_device *in_dev;
        u16 type = res->type;
        struct rtable *rth;
@@ -2490,7 +2579,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
        rth->rt_genid = rt_genid(dev_net(dev_out));
        rth->rt_flags   = flags;
        rth->rt_type    = type;
-       rth->rt_key_tos = tos;
+       rth->rt_key_tos = orig_rtos;
        rth->rt_dst     = fl4->daddr;
        rth->rt_src     = fl4->saddr;
        rth->rt_route_iif = 0;
@@ -2540,7 +2629,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
 static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
 {
        struct net_device *dev_out = NULL;
-       u32 tos = RT_FL_TOS(fl4);
+       __u8 tos = RT_FL_TOS(fl4);
        unsigned int flags = 0;
        struct fib_result res;
        struct rtable *rth;
@@ -2716,7 +2805,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
 
 make_route:
        rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
-                              dev_out, flags);
+                              tos, dev_out, flags);
        if (!IS_ERR(rth)) {
                unsigned int hash;
 
@@ -2752,9 +2841,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
                            (IPTOS_RT_MASK | RTO_ONLINK)) &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
-                       rth = ipv4_validate_peer(rth);
-                       if (!rth)
-                               continue;
+                       ipv4_validate_peer(rth);
                        dst_use(&rth->dst, jiffies);
                        RT_CACHE_STAT_INC(out_hit);
                        rcu_read_unlock_bh();
@@ -3181,6 +3268,13 @@ static ctl_table ipv4_route_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
+       {
+               .procname       = "gc_interval",
+               .data           = &ip_rt_gc_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
        {
                .procname       = "redirect_load",
                .data           = &ip_rt_redirect_load,
@@ -3391,6 +3485,11 @@ int __init ip_rt_init(void)
        devinet_init();
        ip_fib_init();
 
+       INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+       expires_ljiffies = jiffies;
+       schedule_delayed_work(&expires_work,
+               net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
        if (ip_rt_proc_init())
                printk(KERN_ERR "Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
index 90f6544..51fdbb4 100644 (file)
@@ -245,7 +245,7 @@ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
        if (!sysctl_tcp_timestamps)
                return false;
 
-       tcp_opt->sack_ok = (options >> 4) & 0x1;
+       tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0;
        *ecn_ok = (options >> 5) & 1;
        if (*ecn_ok && !sysctl_tcp_ecn)
                return false;
index 69fd720..4aa7e9d 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/nsproxy.h>
+#include <linux/swap.h>
 #include <net/snmp.h>
 #include <net/icmp.h>
 #include <net/ip.h>
@@ -23,6 +24,7 @@
 #include <net/cipso_ipv4.h>
 #include <net/inet_frag.h>
 #include <net/ping.h>
+#include <net/tcp_memcontrol.h>
 
 static int zero;
 static int tcp_retr1_max = 255;
@@ -73,7 +75,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
 }
 
 
-void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
+static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
 {
        gid_t *data = table->data;
        unsigned seq;
@@ -86,7 +88,7 @@ void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t
 }
 
 /* Update system visible IP port range */
-static void set_ping_group_range(struct ctl_table *table, int range[2])
+static void set_ping_group_range(struct ctl_table *table, gid_t range[2])
 {
        gid_t *data = table->data;
        write_seqlock(&sysctl_local_ports.lock);
@@ -174,6 +176,49 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
        return ret;
 }
 
+static int ipv4_tcp_mem(ctl_table *ctl, int write,
+                          void __user *buffer, size_t *lenp,
+                          loff_t *ppos)
+{
+       int ret;
+       unsigned long vec[3];
+       struct net *net = current->nsproxy->net_ns;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       struct mem_cgroup *memcg;
+#endif
+
+       ctl_table tmp = {
+               .data = &vec,
+               .maxlen = sizeof(vec),
+               .mode = ctl->mode,
+       };
+
+       if (!write) {
+               ctl->data = &net->ipv4.sysctl_tcp_mem;
+               return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
+       }
+
+       ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
+       if (ret)
+               return ret;
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       rcu_read_lock();
+       memcg = mem_cgroup_from_task(current);
+
+       tcp_prot_mem(memcg, vec[0], 0);
+       tcp_prot_mem(memcg, vec[1], 1);
+       tcp_prot_mem(memcg, vec[2], 2);
+       rcu_read_unlock();
+#endif
+
+       net->ipv4.sysctl_tcp_mem[0] = vec[0];
+       net->ipv4.sysctl_tcp_mem[1] = vec[1];
+       net->ipv4.sysctl_tcp_mem[2] = vec[2];
+
+       return 0;
+}
+
 static struct ctl_table ipv4_table[] = {
        {
                .procname       = "tcp_timestamps",
@@ -432,13 +477,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "tcp_mem",
-               .data           = &sysctl_tcp_mem,
-               .maxlen         = sizeof(sysctl_tcp_mem),
-               .mode           = 0644,
-               .proc_handler   = proc_doulongvec_minmax
-       },
        {
                .procname       = "tcp_wmem",
                .data           = &sysctl_tcp_wmem,
@@ -721,6 +759,12 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = ipv4_ping_group_range,
        },
+       {
+               .procname       = "tcp_mem",
+               .maxlen         = sizeof(init_net.ipv4.sysctl_tcp_mem),
+               .mode           = 0644,
+               .proc_handler   = ipv4_tcp_mem,
+       },
        { }
 };
 
@@ -734,6 +778,7 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
 static __net_init int ipv4_sysctl_init_net(struct net *net)
 {
        struct ctl_table *table;
+       unsigned long limit;
 
        table = ipv4_net_table;
        if (!net_eq(net, &init_net)) {
@@ -769,6 +814,12 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
 
        net->ipv4.sysctl_rt_cache_rebuild_count = 4;
 
+       limit = nr_free_buffer_pages() / 8;
+       limit = max(limit, 128UL);
+       net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
+       net->ipv4.sysctl_tcp_mem[1] = limit;
+       net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
+
        net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
                        net_ipv4_ctl_path, table);
        if (net->ipv4.ipv4_hdr == NULL)
index a09fe25..9bcdec3 100644 (file)
@@ -282,11 +282,9 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
-long sysctl_tcp_mem[3] __read_mostly;
 int sysctl_tcp_wmem[3] __read_mostly;
 int sysctl_tcp_rmem[3] __read_mostly;
 
-EXPORT_SYMBOL(sysctl_tcp_mem);
 EXPORT_SYMBOL(sysctl_tcp_rmem);
 EXPORT_SYMBOL(sysctl_tcp_wmem);
 
@@ -888,9 +886,6 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
 }
 EXPORT_SYMBOL(tcp_sendpage);
 
-#define TCP_PAGE(sk)   (sk->sk_sndmsg_page)
-#define TCP_OFF(sk)    (sk->sk_sndmsg_off)
-
 static inline int select_size(const struct sock *sk, bool sg)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
@@ -1008,13 +1003,13 @@ new_segment:
                        } else {
                                int merge = 0;
                                int i = skb_shinfo(skb)->nr_frags;
-                               struct page *page = TCP_PAGE(sk);
+                               struct page *page = sk->sk_sndmsg_page;
                                int off;
 
                                if (page && page_count(page) == 1)
-                                       TCP_OFF(sk) = 0;
+                                       sk->sk_sndmsg_off = 0;
 
-                               off = TCP_OFF(sk);
+                               off = sk->sk_sndmsg_off;
 
                                if (skb_can_coalesce(skb, i, page, off) &&
                                    off != PAGE_SIZE) {
@@ -1031,7 +1026,7 @@ new_segment:
                                } else if (page) {
                                        if (off == PAGE_SIZE) {
                                                put_page(page);
-                                               TCP_PAGE(sk) = page = NULL;
+                                               sk->sk_sndmsg_page = page = NULL;
                                                off = 0;
                                        }
                                } else
@@ -1057,9 +1052,9 @@ new_segment:
                                        /* If this page was new, give it to the
                                         * socket so it does not get leaked.
                                         */
-                                       if (!TCP_PAGE(sk)) {
-                                               TCP_PAGE(sk) = page;
-                                               TCP_OFF(sk) = 0;
+                                       if (!sk->sk_sndmsg_page) {
+                                               sk->sk_sndmsg_page = page;
+                                               sk->sk_sndmsg_off = 0;
                                        }
                                        goto do_error;
                                }
@@ -1069,15 +1064,15 @@ new_segment:
                                        skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                                } else {
                                        skb_fill_page_desc(skb, i, page, off, copy);
-                                       if (TCP_PAGE(sk)) {
+                                       if (sk->sk_sndmsg_page) {
                                                get_page(page);
                                        } else if (off + copy < PAGE_SIZE) {
                                                get_page(page);
-                                               TCP_PAGE(sk) = page;
+                                               sk->sk_sndmsg_page = page;
                                        }
                                }
 
-                               TCP_OFF(sk) = off + copy;
+                               sk->sk_sndmsg_off = off + copy;
                        }
 
                        if (!copied)
@@ -3281,14 +3276,9 @@ void __init tcp_init(void)
        sysctl_tcp_max_orphans = cnt / 2;
        sysctl_max_syn_backlog = max(128, cnt / 256);
 
-       limit = nr_free_buffer_pages() / 8;
-       limit = max(limit, 128UL);
-       sysctl_tcp_mem[0] = limit / 4 * 3;
-       sysctl_tcp_mem[1] = limit;
-       sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
-
        /* Set per-socket limits to no more than 1/128 the pressure threshold */
-       limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
+       limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1])
+               << (PAGE_SHIFT - 7);
        max_share = min(4UL*1024*1024, limit);
 
        sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
index 939edb3..8cd357a 100644 (file)
@@ -34,11 +34,23 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                tcp_get_info(sk, info);
 }
 
+static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
+}
+
+static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
+}
+
 static const struct inet_diag_handler tcp_diag_handler = {
-       .idiag_hashinfo  = &tcp_hashinfo,
+       .dump            = tcp_diag_dump,
+       .dump_one        = tcp_diag_dump_one,
        .idiag_get_info  = tcp_diag_get_info,
-       .idiag_type      = TCPDIAG_GETSOCK,
-       .idiag_info_size = sizeof(struct tcp_info),
+       .idiag_type      = IPPROTO_TCP,
 };
 
 static int __init tcp_diag_init(void)
@@ -54,4 +66,4 @@ static void __exit tcp_diag_exit(void)
 module_init(tcp_diag_init);
 module_exit(tcp_diag_exit);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, TCPDIAG_GETSOCK);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-6 /* AF_INET - IPPROTO_TCP */);
index 0cbb440..2877c3e 100644 (file)
@@ -322,7 +322,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
        /* Check #1 */
        if (tp->rcv_ssthresh < tp->window_clamp &&
            (int)tp->rcv_ssthresh < tcp_space(sk) &&
-           !tcp_memory_pressure) {
+           !sk_under_memory_pressure(sk)) {
                int incr;
 
                /* Check #2. Increase window, if skb with such overhead
@@ -411,8 +411,8 @@ static void tcp_clamp_window(struct sock *sk)
 
        if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
-           !tcp_memory_pressure &&
-           atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
+           !sk_under_memory_pressure(sk) &&
+           sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
                sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
                                    sysctl_tcp_rmem[2]);
        }
@@ -865,13 +865,13 @@ static void tcp_disable_fack(struct tcp_sock *tp)
        /* RFC3517 uses different metric in lost marker => reset on change */
        if (tcp_is_fack(tp))
                tp->lost_skb_hint = NULL;
-       tp->rx_opt.sack_ok &= ~2;
+       tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED;
 }
 
 /* Take a notice that peer is sending D-SACKs */
 static void tcp_dsack_seen(struct tcp_sock *tp)
 {
-       tp->rx_opt.sack_ok |= 4;
+       tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
 }
 
 /* Initialize metrics on socket. */
@@ -2663,7 +2663,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
                       tp->snd_ssthresh, tp->prior_ssthresh,
                       tp->packets_out);
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        else if (sk->sk_family == AF_INET6) {
                struct ipv6_pinfo *np = inet6_sk(sk);
                printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
@@ -3878,7 +3878,7 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
                        case TCPOPT_SACK_PERM:
                                if (opsize == TCPOLEN_SACK_PERM && th->syn &&
                                    !estab && sysctl_tcp_sack) {
-                                       opt_rx->sack_ok = 1;
+                                       opt_rx->sack_ok = TCP_SACK_SEEN;
                                        tcp_sack_reset(opt_rx);
                                }
                                break;
@@ -4866,7 +4866,7 @@ static int tcp_prune_queue(struct sock *sk)
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                tcp_clamp_window(sk);
-       else if (tcp_memory_pressure)
+       else if (sk_under_memory_pressure(sk))
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
@@ -4932,11 +4932,11 @@ static int tcp_should_expand_sndbuf(const struct sock *sk)
                return 0;
 
        /* If we are under global TCP memory pressure, do not expand.  */
-       if (tcp_memory_pressure)
+       if (sk_under_memory_pressure(sk))
                return 0;
 
        /* If we are under soft global TCP memory pressure, do not expand.  */
-       if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
+       if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
                return 0;
 
        /* If we filled the congestion window, do not expand.  */
index c4b8b09..1eb4ad5 100644 (file)
@@ -73,6 +73,7 @@
 #include <net/xfrm.h>
 #include <net/netdma.h>
 #include <net/secure_seq.h>
+#include <net/tcp_memcontrol.h>
 
 #include <linux/inet.h>
 #include <linux/ipv6.h>
@@ -1917,7 +1918,8 @@ static int tcp_v4_init_sock(struct sock *sk)
        sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
        local_bh_disable();
-       percpu_counter_inc(&tcp_sockets_allocated);
+       sock_update_memcg(sk);
+       sk_sockets_allocated_inc(sk);
        local_bh_enable();
 
        return 0;
@@ -1973,7 +1975,8 @@ void tcp_v4_destroy_sock(struct sock *sk)
                tp->cookie_values = NULL;
        }
 
-       percpu_counter_dec(&tcp_sockets_allocated);
+       sk_sockets_allocated_dec(sk);
+       sock_release_memcg(sk);
 }
 EXPORT_SYMBOL(tcp_v4_destroy_sock);
 
@@ -2620,7 +2623,6 @@ struct proto tcp_prot = {
        .orphan_count           = &tcp_orphan_count,
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
-       .sysctl_mem             = sysctl_tcp_mem,
        .sysctl_wmem            = sysctl_tcp_wmem,
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
@@ -2634,10 +2636,14 @@ struct proto tcp_prot = {
        .compat_setsockopt      = compat_tcp_setsockopt,
        .compat_getsockopt      = compat_tcp_getsockopt,
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       .init_cgroup            = tcp_init_cgroup,
+       .destroy_cgroup         = tcp_destroy_cgroup,
+       .proto_cgroup           = tcp_proto_cgroup,
+#endif
 };
 EXPORT_SYMBOL(tcp_prot);
 
-
 static int __net_init tcp_sk_init(struct net *net)
 {
        return inet_ctl_sock_create(&net->ipv4.tcp_sock,
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
new file mode 100644 (file)
index 0000000..7fed04f
--- /dev/null
@@ -0,0 +1,272 @@
+#include <net/tcp.h>
+#include <net/tcp_memcontrol.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <linux/nsproxy.h>
+#include <linux/memcontrol.h>
+#include <linux/module.h>
+
+static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft);
+static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+                           const char *buffer);
+static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event);
+
+static struct cftype tcp_files[] = {
+       {
+               .name = "kmem.tcp.limit_in_bytes",
+               .write_string = tcp_cgroup_write,
+               .read_u64 = tcp_cgroup_read,
+               .private = RES_LIMIT,
+       },
+       {
+               .name = "kmem.tcp.usage_in_bytes",
+               .read_u64 = tcp_cgroup_read,
+               .private = RES_USAGE,
+       },
+       {
+               .name = "kmem.tcp.failcnt",
+               .private = RES_FAILCNT,
+               .trigger = tcp_cgroup_reset,
+               .read_u64 = tcp_cgroup_read,
+       },
+       {
+               .name = "kmem.tcp.max_usage_in_bytes",
+               .private = RES_MAX_USAGE,
+               .trigger = tcp_cgroup_reset,
+               .read_u64 = tcp_cgroup_read,
+       },
+};
+
+static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
+{
+       return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
+}
+
+static void memcg_tcp_enter_memory_pressure(struct sock *sk)
+{
+       if (sk->sk_cgrp->memory_pressure)
+               *sk->sk_cgrp->memory_pressure = 1;
+}
+EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
+
+int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       /*
+        * The root cgroup does not use res_counters, but rather,
+        * rely on the data already collected by the network
+        * subsystem
+        */
+       struct res_counter *res_parent = NULL;
+       struct cg_proto *cg_proto, *parent_cg;
+       struct tcp_memcontrol *tcp;
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+       struct net *net = current->nsproxy->net_ns;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               goto create_files;
+
+       tcp = tcp_from_cgproto(cg_proto);
+
+       tcp->tcp_prot_mem[0] = net->ipv4.sysctl_tcp_mem[0];
+       tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1];
+       tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2];
+       tcp->tcp_memory_pressure = 0;
+
+       parent_cg = tcp_prot.proto_cgroup(parent);
+       if (parent_cg)
+               res_parent = parent_cg->memory_allocated;
+
+       res_counter_init(&tcp->tcp_memory_allocated, res_parent);
+       percpu_counter_init(&tcp->tcp_sockets_allocated, 0);
+
+       cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure;
+       cg_proto->memory_pressure = &tcp->tcp_memory_pressure;
+       cg_proto->sysctl_mem = tcp->tcp_prot_mem;
+       cg_proto->memory_allocated = &tcp->tcp_memory_allocated;
+       cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
+       cg_proto->memcg = memcg;
+
+create_files:
+       return cgroup_add_files(cgrp, ss, tcp_files,
+                               ARRAY_SIZE(tcp_files));
+}
+EXPORT_SYMBOL(tcp_init_cgroup);
+
+void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct cg_proto *cg_proto;
+       struct tcp_memcontrol *tcp;
+       u64 val;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       percpu_counter_destroy(&tcp->tcp_sockets_allocated);
+
+       val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+
+       if (val != RESOURCE_MAX)
+               jump_label_dec(&memcg_socket_limit_enabled);
+}
+EXPORT_SYMBOL(tcp_destroy_cgroup);
+
+static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
+{
+       struct net *net = current->nsproxy->net_ns;
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+       u64 old_lim;
+       int i;
+       int ret;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return -EINVAL;
+
+       if (val > RESOURCE_MAX)
+               val = RESOURCE_MAX;
+
+       tcp = tcp_from_cgproto(cg_proto);
+
+       old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
+       ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < 3; i++)
+               tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
+                                            net->ipv4.sysctl_tcp_mem[i]);
+
+       if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
+               jump_label_dec(&memcg_socket_limit_enabled);
+       else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
+               jump_label_inc(&memcg_socket_limit_enabled);
+
+       return 0;
+}
+
+static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+                           const char *buffer)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       unsigned long long val;
+       int ret = 0;
+
+       switch (cft->private) {
+       case RES_LIMIT:
+               /* see memcontrol.c */
+               ret = res_counter_memparse_write_strategy(buffer, &val);
+               if (ret)
+                       break;
+               ret = tcp_update_limit(memcg, val);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return default_val;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       return res_counter_read_u64(&tcp->tcp_memory_allocated, type);
+}
+
+static u64 tcp_read_usage(struct mem_cgroup *memcg)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+}
+
+static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       u64 val;
+
+       switch (cft->private) {
+       case RES_LIMIT:
+               val = tcp_read_stat(memcg, RES_LIMIT, RESOURCE_MAX);
+               break;
+       case RES_USAGE:
+               val = tcp_read_usage(memcg);
+               break;
+       case RES_FAILCNT:
+       case RES_MAX_USAGE:
+               val = tcp_read_stat(memcg, cft->private, 0);
+               break;
+       default:
+               BUG();
+       }
+       return val;
+}
+
+static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event)
+{
+       struct mem_cgroup *memcg;
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       memcg = mem_cgroup_from_cont(cont);
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return 0;
+       tcp = tcp_from_cgproto(cg_proto);
+
+       switch (event) {
+       case RES_MAX_USAGE:
+               res_counter_reset_max(&tcp->tcp_memory_allocated);
+               break;
+       case RES_FAILCNT:
+               res_counter_reset_failcnt(&tcp->tcp_memory_allocated);
+               break;
+       }
+
+       return 0;
+}
+
+unsigned long long tcp_max_memory(const struct mem_cgroup *memcg)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup((struct mem_cgroup *)memcg);
+       if (!cg_proto)
+               return 0;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
+}
+
+void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return;
+
+       tcp = tcp_from_cgproto(cg_proto);
+
+       tcp->tcp_prot_mem[idx] = val;
+}
index 9dc146e..550e755 100644 (file)
@@ -336,7 +336,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                tcptw->tw_ts_recent     = tp->rx_opt.ts_recent;
                tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
                        struct ipv6_pinfo *np = inet6_sk(sk);
                        struct inet6_timewait_sock *tw6;
index 50788d6..8c8de27 100644 (file)
@@ -1093,6 +1093,13 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
 {
        int i, k, eat;
 
+       eat = min_t(int, len, skb_headlen(skb));
+       if (eat) {
+               __skb_pull(skb, eat);
+               len -= eat;
+               if (!len)
+                       return;
+       }
        eat = len;
        k = 0;
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -1124,11 +1131,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
        if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
                return -ENOMEM;
 
-       /* If len == headlen, we avoid __skb_pull to preserve alignment. */
-       if (unlikely(len < skb_headlen(skb)))
-               __skb_pull(skb, len);
-       else
-               __pskb_trim_head(skb, len - skb_headlen(skb));
+       __pskb_trim_head(skb, len);
 
        TCP_SKB_CB(skb)->seq += len;
        skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1919,7 +1922,7 @@ u32 __tcp_select_window(struct sock *sk)
        if (free_space < (full_space >> 1)) {
                icsk->icsk_ack.quick = 0;
 
-               if (tcp_memory_pressure)
+               if (sk_under_memory_pressure(sk))
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh,
                                               4U * tp->advmss);
 
index 2e0f0af..a516d1e 100644 (file)
@@ -171,13 +171,13 @@ static int tcp_write_timeout(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        int retry_until;
-       bool do_reset, syn_set = 0;
+       bool do_reset, syn_set = false;
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
                if (icsk->icsk_retransmits)
                        dst_negative_advice(sk);
                retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
-               syn_set = 1;
+               syn_set = true;
        } else {
                if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
                        /* Black hole detection */
@@ -261,7 +261,7 @@ static void tcp_delack_timer(unsigned long data)
        }
 
 out:
-       if (tcp_memory_pressure)
+       if (sk_under_memory_pressure(sk))
                sk_mem_reclaim(sk);
 out_unlock:
        bh_unlock_sock(sk);
@@ -340,7 +340,7 @@ void tcp_retransmit_timer(struct sock *sk)
                               &inet->inet_daddr, ntohs(inet->inet_dport),
                               inet->inet_num, tp->snd_una, tp->snd_nxt);
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                else if (sk->sk_family == AF_INET6) {
                        struct ipv6_pinfo *np = inet6_sk(sk);
                        LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
index ac3b3ee..0177598 100644 (file)
@@ -105,7 +105,7 @@ drop:
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int tunnel64_rcv(struct sk_buff *skb)
 {
        struct xfrm_tunnel *handler;
@@ -134,7 +134,7 @@ static void tunnel4_err(struct sk_buff *skb, u32 info)
                        break;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static void tunnel64_err(struct sk_buff *skb, u32 info)
 {
        struct xfrm_tunnel *handler;
@@ -152,7 +152,7 @@ static const struct net_protocol tunnel4_protocol = {
        .netns_ok       =       1,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static const struct net_protocol tunnel64_protocol = {
        .handler        =       tunnel64_rcv,
        .err_handler    =       tunnel64_err,
@@ -167,7 +167,7 @@ static int __init tunnel4_init(void)
                printk(KERN_ERR "tunnel4 init: can't add protocol\n");
                return -EAGAIN;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) {
                printk(KERN_ERR "tunnel64 init: can't add protocol\n");
                inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP);
@@ -179,7 +179,7 @@ static int __init tunnel4_init(void)
 
 static void __exit tunnel4_fini(void)
 {
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6))
                printk(KERN_ERR "tunnel64 close: can't remove protocol\n");
 #endif
index ad481b3..5d075b5 100644 (file)
@@ -445,7 +445,7 @@ exact_match:
 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  * harder than this. -DaveM
  */
-static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
+struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
                __be16 sport, __be32 daddr, __be16 dport,
                int dif, struct udp_table *udptable)
 {
@@ -512,6 +512,7 @@ begin:
        rcu_read_unlock();
        return result;
 }
+EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
 
 static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
                                                 __be16 sport, __be16 dport,
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
new file mode 100644 (file)
index 0000000..69f8a7c
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * udp_diag.c  Module for monitoring UDP transport protocols sockets.
+ *
+ * Authors:    Pavel Emelyanov, <xemul@parallels.com>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+
+#include <linux/module.h>
+#include <linux/inet_diag.h>
+#include <linux/udp.h>
+#include <net/udp.h>
+#include <net/udplite.h>
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+
+static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
+               struct netlink_callback *cb, struct inet_diag_req *req,
+               struct nlattr *bc)
+{
+       if (!inet_diag_bc_sk(bc, sk))
+               return 0;
+
+       return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid,
+                       cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+}
+
+static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
+               const struct nlmsghdr *nlh, struct inet_diag_req *req)
+{
+       int err = -EINVAL;
+       struct sock *sk;
+       struct sk_buff *rep;
+
+       if (req->sdiag_family == AF_INET)
+               sk = __udp4_lib_lookup(&init_net,
+                               req->id.idiag_src[0], req->id.idiag_sport,
+                               req->id.idiag_dst[0], req->id.idiag_dport,
+                               req->id.idiag_if, tbl);
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (req->sdiag_family == AF_INET6)
+               sk = __udp6_lib_lookup(&init_net,
+                               (struct in6_addr *)req->id.idiag_src,
+                               req->id.idiag_sport,
+                               (struct in6_addr *)req->id.idiag_dst,
+                               req->id.idiag_dport,
+                               req->id.idiag_if, tbl);
+#endif
+       else
+               goto out_nosk;
+
+       err = -ENOENT;
+       if (sk == NULL)
+               goto out_nosk;
+
+       err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
+       if (err)
+               goto out;
+
+       err = -ENOMEM;
+       rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
+                                    sizeof(struct inet_diag_meminfo) +
+                                    64)), GFP_KERNEL);
+       if (!rep)
+               goto out;
+
+       err = inet_sk_diag_fill(sk, NULL, rep, req,
+                          NETLINK_CB(in_skb).pid,
+                          nlh->nlmsg_seq, 0, nlh);
+       if (err < 0) {
+               WARN_ON(err == -EMSGSIZE);
+               kfree_skb(rep);
+               goto out;
+       }
+       err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+                             MSG_DONTWAIT);
+       if (err > 0)
+               err = 0;
+out:
+       if (sk)
+               sock_put(sk);
+out_nosk:
+       return err;
+}
+
+static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       int num, s_num, slot, s_slot;
+
+       s_slot = cb->args[0];
+       num = s_num = cb->args[1];
+
+       for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
+               struct sock *sk;
+               struct hlist_nulls_node *node;
+               struct udp_hslot *hslot = &table->hash[slot];
+
+               if (hlist_nulls_empty(&hslot->head))
+                       continue;
+
+               spin_lock_bh(&hslot->lock);
+               sk_nulls_for_each(sk, node, &hslot->head) {
+                       struct inet_sock *inet = inet_sk(sk);
+
+                       if (num < s_num)
+                               goto next;
+                       if (!(r->idiag_states & (1 << sk->sk_state)))
+                               goto next;
+                       if (r->sdiag_family != AF_UNSPEC &&
+                                       sk->sk_family != r->sdiag_family)
+                               goto next;
+                       if (r->id.idiag_sport != inet->inet_sport &&
+                           r->id.idiag_sport)
+                               goto next;
+                       if (r->id.idiag_dport != inet->inet_dport &&
+                           r->id.idiag_dport)
+                               goto next;
+
+                       if (sk_diag_dump(sk, skb, cb, r, bc) < 0) {
+                               spin_unlock_bh(&hslot->lock);
+                               goto done;
+                       }
+next:
+                       num++;
+               }
+               spin_unlock_bh(&hslot->lock);
+       }
+done:
+       cb->args[0] = slot;
+       cb->args[1] = num;
+}
+
+static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       udp_dump(&udp_table, skb, cb, r, bc);
+}
+
+static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return udp_dump_one(&udp_table, in_skb, nlh, req);
+}
+
+static const struct inet_diag_handler udp_diag_handler = {
+       .dump            = udp_diag_dump,
+       .dump_one        = udp_diag_dump_one,
+       .idiag_type      = IPPROTO_UDP,
+};
+
+static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       udp_dump(&udplite_table, skb, cb, r, bc);
+}
+
+static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return udp_dump_one(&udplite_table, in_skb, nlh, req);
+}
+
+static const struct inet_diag_handler udplite_diag_handler = {
+       .dump            = udplite_diag_dump,
+       .dump_one        = udplite_diag_dump_one,
+       .idiag_type      = IPPROTO_UDPLITE,
+};
+
+static int __init udp_diag_init(void)
+{
+       int err;
+
+       err = inet_diag_register(&udp_diag_handler);
+       if (err)
+               goto out;
+       err = inet_diag_register(&udplite_diag_handler);
+       if (err)
+               goto out_lite;
+out:
+       return err;
+out_lite:
+       inet_diag_unregister(&udp_diag_handler);
+       goto out;
+}
+
+static void __exit udp_diag_exit(void)
+{
+       inet_diag_unregister(&udplite_diag_handler);
+       inet_diag_unregister(&udp_diag_handler);
+}
+
+module_init(udp_diag_init);
+module_exit(udp_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);
index 8280645..9247d9d 100644 (file)
@@ -64,7 +64,7 @@ static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = {
        .priority       =       2,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
        .handler        =       xfrm_tunnel_rcv,
        .err_handler    =       xfrm_tunnel_err,
@@ -84,7 +84,7 @@ static int __init ipip_init(void)
                xfrm_unregister_type(&ipip_type, AF_INET);
                return -EAGAIN;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) {
                printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET6\n");
                xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET);
@@ -97,7 +97,7 @@ static int __init ipip_init(void)
 
 static void __exit ipip_fini(void)
 {
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6))
                printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET6\n");
 #endif
index 5860517..59a9d0e 100644 (file)
@@ -630,7 +630,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
                goto out;
        }
 
-       rt = addrconf_dst_alloc(idev, addr, 0);
+       rt = addrconf_dst_alloc(idev, addr, false);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
                goto out;
@@ -657,7 +657,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
         * layer address of our nexhop router
         */
 
-       if (dst_get_neighbour_raw(&rt->dst) == NULL)
+       if (dst_get_neighbour_noref_raw(&rt->dst) == NULL)
                ifa->flags &= ~IFA_F_OPTIMISTIC;
 
        ifa->idev = idev;
@@ -1805,7 +1805,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
                return ERR_PTR(-EACCES);
 
        /* Add default multicast route */
-       addrconf_add_mroute(dev);
+       if (!(dev->flags & IFF_LOOPBACK))
+               addrconf_add_mroute(dev);
 
        /* Add link local route */
        addrconf_add_lroute(dev);
index 7694c82..273f48d 100644 (file)
@@ -1116,6 +1116,8 @@ static int __init inet6_init(void)
        if (err)
                goto static_sysctl_fail;
 #endif
+       tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
+
        /*
         *      ipngwg API draft makes clear that the correct semantics
         *      for TCP and UDP is to consider one TCP and UDP instance
index fc1cdcd..cc540f9 100644 (file)
@@ -289,7 +289,7 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
                goto out;
        }
 
-       rt = addrconf_dst_alloc(idev, addr, 1);
+       rt = addrconf_dst_alloc(idev, addr, true);
        if (IS_ERR(rt)) {
                kfree(aca);
                err = PTR_ERR(rt);
index 7b47303..2783631 100644 (file)
@@ -1533,7 +1533,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
                        RT6_TRACE("aging clone %p\n", rt);
                        return -1;
                } else if ((rt->rt6i_flags & RTF_GATEWAY) &&
-                          (!(dst_get_neighbour_raw(&rt->dst)->flags & NTF_ROUTER))) {
+                          (!(dst_get_neighbour_noref_raw(&rt->dst)->flags & NTF_ROUTER))) {
                        RT6_TRACE("purging route %p via non-router but gateway\n",
                                  rt);
                        return -1;
index 3221bc6..d97e071 100644 (file)
@@ -136,7 +136,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
        }
 
        rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
+       neigh = dst_get_neighbour_noref(dst);
        if (neigh) {
                int res = neigh_output(neigh, skb);
 
@@ -463,7 +463,7 @@ int ip6_forward(struct sk_buff *skb)
           send redirects to source routed frames.
           We don't send redirects to frames decapsulated from IPsec.
         */
-       n = dst_get_neighbour(dst);
+       n = dst_get_neighbour_noref(dst);
        if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
                struct in6_addr *target = NULL;
                struct rt6_info *rt;
@@ -604,7 +604,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
        static atomic_t ipv6_fragmentation_id;
        int old, new;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                struct inet_peer *peer;
 
                if (!rt->rt6i_peer)
@@ -983,7 +983,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
         * dst entry of the nexthop router
         */
        rcu_read_lock();
-       n = dst_get_neighbour(*dst);
+       n = dst_get_neighbour_noref(*dst);
        if (n && !(n->nud_state & NUD_VALID)) {
                struct inet6_ifaddr *ifp;
                struct flowi6 fl_gw6;
index 518cbb9..ea34d58 100644 (file)
@@ -1410,18 +1410,11 @@ static void mld_sendpack(struct sk_buff *skb)
                                           csum_partial(skb_transport_header(skb),
                                                        mldlen, 0));
 
-       dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
-
-       if (!dst) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-
        icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
                         skb->dev->ifindex);
+       dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
 
-       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
        err = 0;
        if (IS_ERR(dst)) {
                err = PTR_ERR(dst);
@@ -1785,17 +1778,10 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
        rcu_read_lock();
        idev = __in6_dev_get(skb->dev);
 
-       dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
-       if (!dst) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-
        icmpv6_flow_init(sk, &fl6, type,
                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
                         skb->dev->ifindex);
-
-       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
        if (IS_ERR(dst)) {
                err = PTR_ERR(dst);
                goto err_out;
index cfb9709..f3e50c2 100644 (file)
@@ -516,14 +516,7 @@ void ndisc_send_skb(struct sk_buff *skb,
        type = icmp6h->icmp6_type;
 
        icmpv6_flow_init(sk, &fl6, type, saddr, daddr, dev->ifindex);
-
-       dst = icmp6_dst_alloc(dev, neigh, daddr);
-       if (!dst) {
-               kfree_skb(skb);
-               return;
-       }
-
-       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       dst = icmp6_dst_alloc(dev, neigh, &fl6);
        if (IS_ERR(dst)) {
                kfree_skb(skb);
                return;
@@ -1238,7 +1231,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
 
        if (rt)
-               neigh = dst_get_neighbour(&rt->dst);
+               neigh = dst_get_neighbour_noref(&rt->dst);
 
        if (rt && lifetime == 0) {
                neigh_clone(neigh);
@@ -1258,7 +1251,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                        return;
                }
 
-               neigh = dst_get_neighbour(&rt->dst);
+               neigh = dst_get_neighbour_noref(&rt->dst);
                if (neigh == NULL) {
                        ND_PRINTK0(KERN_ERR
                                   "ICMPv6 RA: %s() got default router without neighbour.\n",
index c9e37c8..a8f6da9 100644 (file)
@@ -44,7 +44,7 @@ ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
 static struct nf_hook_ops *filter_ops __read_mostly;
 
 /* Default to forward because I got too much mail already. */
-static int forward = NF_ACCEPT;
+static bool forward = NF_ACCEPT;
 module_param(forward, bool, 0000);
 
 static int __net_init ip6table_filter_net_init(struct net *net)
index ab48b02..5855e9e 100644 (file)
@@ -385,7 +385,7 @@ static void rt6_probe(struct rt6_info *rt)
         * to no more than one per minute.
         */
        rcu_read_lock();
-       neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
+       neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL;
        if (!neigh || (neigh->nud_state & NUD_VALID))
                goto out;
        read_lock_bh(&neigh->lock);
@@ -432,7 +432,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
        int m;
 
        rcu_read_lock();
-       neigh = dst_get_neighbour(&rt->dst);
+       neigh = dst_get_neighbour_noref(&rt->dst);
        if (rt->rt6i_flags & RTF_NONEXTHOP ||
            !(rt->rt6i_flags & RTF_GATEWAY))
                m = 1;
@@ -735,7 +735,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
                int attempts = !in_softirq();
 
                if (!(rt->rt6i_flags & RTF_GATEWAY)) {
-                       if (rt->rt6i_dst.plen != 128 &&
+                       if (ort->rt6i_dst.plen != 128 &&
                            ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
                                rt->rt6i_flags |= RTF_ANYCAST;
                        rt->rt6i_gateway = *daddr;
@@ -793,7 +793,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
 
        if (rt) {
                rt->rt6i_flags |= RTF_CACHE;
-               dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
+               dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst)));
        }
        return rt;
 }
@@ -827,7 +827,7 @@ restart:
        dst_hold(&rt->dst);
        read_unlock_bh(&table->tb6_lock);
 
-       if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+       if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
                nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
        else if (!(rt->dst.flags & DST_HOST))
                nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -1075,8 +1075,9 @@ static DEFINE_SPINLOCK(icmp6_dst_lock);
 
 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
                                  struct neighbour *neigh,
-                                 const struct in6_addr *addr)
+                                 struct flowi6 *fl6)
 {
+       struct dst_entry *dst;
        struct rt6_info *rt;
        struct inet6_dev *idev = in6_dev_get(dev);
        struct net *net = dev_net(dev);
@@ -1087,22 +1088,25 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
        if (unlikely(!rt)) {
                in6_dev_put(idev);
+               dst = ERR_PTR(-ENOMEM);
                goto out;
        }
 
        if (neigh)
                neigh_hold(neigh);
        else {
-               neigh = __neigh_lookup_errno(&nd_tbl, addr, dev);
-               if (IS_ERR(neigh))
-                       neigh = NULL;
+               neigh = __neigh_lookup_errno(&nd_tbl, &fl6->daddr, dev);
+               if (IS_ERR(neigh)) {
+                       dst_free(&rt->dst);
+                       return ERR_CAST(neigh);
+               }
        }
 
        rt->dst.flags |= DST_HOST;
        rt->dst.output  = ip6_output;
        dst_set_neighbour(&rt->dst, neigh);
        atomic_set(&rt->dst.__refcnt, 1);
-       rt->rt6i_dst.addr = *addr;
+       rt->rt6i_dst.addr = fl6->daddr;
        rt->rt6i_dst.plen = 128;
        rt->rt6i_idev     = idev;
        dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
@@ -1114,8 +1118,10 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
 
        fib6_force_start_gc(net);
 
+       dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
+
 out:
-       return &rt->dst;
+       return dst;
 }
 
 int icmp6_dst_gc(void)
@@ -1636,7 +1642,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
        dst_confirm(&rt->dst);
 
        /* Duplicate redirect: silently ignore. */
-       if (neigh == dst_get_neighbour_raw(&rt->dst))
+       if (neigh == dst_get_neighbour_noref_raw(&rt->dst))
                goto out;
 
        nrt = ip6_rt_copy(rt, dest);
@@ -1728,7 +1734,7 @@ again:
           1. It is connected route. Action: COW
           2. It is gatewayed route or NONEXTHOP route. Action: clone it.
         */
-       if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+       if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
                nrt = rt6_alloc_cow(rt, daddr, saddr);
        else
                nrt = rt6_alloc_clone(rt, daddr);
@@ -2063,7 +2069,7 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb)
 
 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                                    const struct in6_addr *addr,
-                                   int anycast)
+                                   bool anycast)
 {
        struct net *net = dev_net(idev->dev);
        struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
@@ -2463,7 +2469,7 @@ static int rt6_fill_node(struct net *net,
                goto nla_put_failure;
 
        rcu_read_lock();
-       n = dst_get_neighbour(&rt->dst);
+       n = dst_get_neighbour_noref(&rt->dst);
        if (n)
                NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
        rcu_read_unlock();
@@ -2660,7 +2666,7 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
        seq_puts(m, "00000000000000000000000000000000 00 ");
 #endif
        rcu_read_lock();
-       n = dst_get_neighbour(&rt->dst);
+       n = dst_get_neighbour_noref(&rt->dst);
        if (n) {
                seq_printf(m, "%pi6", n->primary_key);
        } else {
index 50968f2..3b6dac9 100644 (file)
@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
        if (register_netdevice(dev) < 0)
                goto failed_free;
 
+       strcpy(nt->parms.name, dev->name);
+
        dev_hold(dev);
 
        ipip6_tunnel_link(sitn, nt);
@@ -680,7 +682,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                struct neighbour *neigh = NULL;
 
                if (skb_dst(skb))
-                       neigh = dst_get_neighbour(skb_dst(skb));
+                       neigh = dst_get_neighbour_noref(skb_dst(skb));
 
                if (neigh == NULL) {
                        if (net_ratelimit())
@@ -705,7 +707,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                struct neighbour *neigh = NULL;
 
                if (skb_dst(skb))
-                       neigh = dst_get_neighbour(skb_dst(skb));
+                       neigh = dst_get_neighbour_noref(skb_dst(skb));
 
                if (neigh == NULL) {
                        if (net_ratelimit())
@@ -1144,7 +1146,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
-       strcpy(tunnel->parms.name, dev->name);
 
        memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
        memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -1207,6 +1208,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
 static int __net_init sit_init_net(struct net *net)
 {
        struct sit_net *sitn = net_generic(net, sit_net_id);
+       struct ip_tunnel *t;
        int err;
 
        sitn->tunnels[0] = sitn->tunnels_wc;
@@ -1231,6 +1233,9 @@ static int __net_init sit_init_net(struct net *net)
        if ((err = register_netdev(sitn->fb_tunnel_dev)))
                goto err_reg_dev;
 
+       t = netdev_priv(sitn->fb_tunnel_dev);
+
+       strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
        return 0;
 
 err_reg_dev:
index 9d74eee..906c7ca 100644 (file)
@@ -62,6 +62,7 @@
 #include <net/netdma.h>
 #include <net/inet_common.h>
 #include <net/secure_seq.h>
+#include <net/tcp_memcontrol.h>
 
 #include <asm/uaccess.h>
 
@@ -1994,7 +1995,8 @@ static int tcp_v6_init_sock(struct sock *sk)
        sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
        local_bh_disable();
-       percpu_counter_inc(&tcp_sockets_allocated);
+       sock_update_memcg(sk);
+       sk_sockets_allocated_inc(sk);
        local_bh_enable();
 
        return 0;
@@ -2213,7 +2215,6 @@ struct proto tcpv6_prot = {
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
        .orphan_count           = &tcp_orphan_count,
-       .sysctl_mem             = sysctl_tcp_mem,
        .sysctl_wmem            = sysctl_tcp_wmem,
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
@@ -2227,6 +2228,9 @@ struct proto tcpv6_prot = {
        .compat_setsockopt      = compat_tcp_setsockopt,
        .compat_getsockopt      = compat_tcp_getsockopt,
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       .proto_cgroup           = tcp_proto_cgroup,
+#endif
 };
 
 static const struct inet6_protocol tcpv6_protocol = {
index adfe26a..4f96b5c 100644 (file)
@@ -238,7 +238,7 @@ exact_match:
        return result;
 }
 
-static struct sock *__udp6_lib_lookup(struct net *net,
+struct sock *__udp6_lib_lookup(struct net *net,
                                      const struct in6_addr *saddr, __be16 sport,
                                      const struct in6_addr *daddr, __be16 dport,
                                      int dif, struct udp_table *udptable)
@@ -305,6 +305,7 @@ begin:
        rcu_read_unlock();
        return result;
 }
+EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
 
 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
                                          __be16 sport, __be16 dport,
index c24f25a..bb14c34 100644 (file)
@@ -2558,8 +2558,8 @@ bed:
                        self->errno = 0;
                        setup_timer(&self->watchdog, irda_discovery_timeout,
                                        (unsigned long)self);
-                       self->watchdog.expires = jiffies + (val * HZ/1000);
-                       add_timer(&(self->watchdog));
+                       mod_timer(&self->watchdog,
+                                 jiffies + msecs_to_jiffies(val));
 
                        /* Wait for IR-LMP to call us back */
                        __wait_event_interruptible(self->query_wait,
index 7791176..579617c 100644 (file)
@@ -67,7 +67,7 @@ static void *ckey;
 static void *skey;
 
 /* Module parameters */
-static int eth;   /* Use "eth" or "irlan" name for devices */
+static bool eth;   /* Use "eth" or "irlan" name for devices */
 static int access = ACCESS_PEER; /* PEER, DIRECT or HOSTED */
 
 #ifdef CONFIG_PROC_FS
index 274d150..d5c5b8f 100644 (file)
@@ -130,6 +130,17 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
        memcpy(&dst[8], src, 8);
 }
 
+static void iucv_skb_queue_purge(struct sk_buff_head *list)
+{
+       struct sk_buff *skb;
+
+       while ((skb = skb_dequeue(list)) != NULL) {
+               if (skb->dev)
+                       dev_put(skb->dev);
+               kfree_skb(skb);
+       }
+}
+
 static int afiucv_pm_prepare(struct device *dev)
 {
 #ifdef CONFIG_PM_DEBUG
@@ -164,10 +175,9 @@ static int afiucv_pm_freeze(struct device *dev)
        read_lock(&iucv_sk_list.lock);
        sk_for_each(sk, node, &iucv_sk_list.head) {
                iucv = iucv_sk(sk);
-               skb_queue_purge(&iucv->send_skb_q);
+               iucv_skb_queue_purge(&iucv->send_skb_q);
                skb_queue_purge(&iucv->backlog_skb_q);
                switch (sk->sk_state) {
-               case IUCV_SEVERED:
                case IUCV_DISCONN:
                case IUCV_CLOSING:
                case IUCV_CONNECTED:
@@ -212,7 +222,6 @@ static int afiucv_pm_restore_thaw(struct device *dev)
                        sk->sk_state_change(sk);
                        break;
                case IUCV_DISCONN:
-               case IUCV_SEVERED:
                case IUCV_CLOSING:
                case IUCV_LISTEN:
                case IUCV_BOUND:
@@ -366,9 +375,7 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
        if (imsg)
                memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
 
-       rcu_read_lock();
-       skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
-       rcu_read_unlock();
+       skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if);
        if (!skb->dev)
                return -ENODEV;
        if (!(skb->dev->flags & IFF_UP))
@@ -388,6 +395,7 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
        err = dev_queue_xmit(skb);
        if (err) {
                skb_unlink(nskb, &iucv->send_skb_q);
+               dev_put(nskb->dev);
                kfree_skb(nskb);
        } else {
                atomic_sub(confirm_recv, &iucv->msg_recv);
@@ -396,25 +404,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
        return err;
 }
 
-/* Timers */
-static void iucv_sock_timeout(unsigned long arg)
-{
-       struct sock *sk = (struct sock *)arg;
-
-       bh_lock_sock(sk);
-       sk->sk_err = ETIMEDOUT;
-       sk->sk_state_change(sk);
-       bh_unlock_sock(sk);
-
-       iucv_sock_kill(sk);
-       sock_put(sk);
-}
-
-static void iucv_sock_clear_timer(struct sock *sk)
-{
-       sk_stop_timer(sk, &sk->sk_timer);
-}
-
 static struct sock *__iucv_get_sock_by_name(char *nm)
 {
        struct sock *sk;
@@ -467,7 +456,6 @@ static void iucv_sock_close(struct sock *sk)
        int err, blen;
        struct sk_buff *skb;
 
-       iucv_sock_clear_timer(sk);
        lock_sock(sk);
 
        switch (sk->sk_state) {
@@ -481,16 +469,14 @@ static void iucv_sock_close(struct sock *sk)
                        blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
                        skb = sock_alloc_send_skb(sk, blen, 1, &err);
                        if (skb) {
-                               skb_reserve(skb,
-                                       sizeof(struct af_iucv_trans_hdr) +
-                                       ETH_HLEN);
+                               skb_reserve(skb, blen);
                                err = afiucv_hs_send(NULL, sk, skb,
                                                     AF_IUCV_FLAG_FIN);
                        }
                        sk->sk_state = IUCV_DISCONN;
                        sk->sk_state_change(sk);
                }
-       case IUCV_DISCONN:
+       case IUCV_DISCONN:   /* fall through */
                sk->sk_state = IUCV_CLOSING;
                sk->sk_state_change(sk);
 
@@ -520,7 +506,7 @@ static void iucv_sock_close(struct sock *sk)
                sk->sk_err = ECONNRESET;
                sk->sk_state_change(sk);
 
-               skb_queue_purge(&iucv->send_skb_q);
+               iucv_skb_queue_purge(&iucv->send_skb_q);
                skb_queue_purge(&iucv->backlog_skb_q);
                break;
 
@@ -581,8 +567,6 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
        sk->sk_protocol = proto;
        sk->sk_state    = IUCV_OPEN;
 
-       setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
-
        iucv_sock_link(&iucv_sk_list, sk);
        return sk;
 }
@@ -675,16 +659,12 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
                }
 
                if (sk->sk_state == IUCV_CONNECTED ||
-                   sk->sk_state == IUCV_SEVERED ||
-                   sk->sk_state == IUCV_DISCONN ||     /* due to PM restore */
+                   sk->sk_state == IUCV_DISCONN ||
                    !newsock) {
                        iucv_accept_unlink(sk);
                        if (newsock)
                                sock_graft(sk, newsock);
 
-                       if (sk->sk_state == IUCV_SEVERED)
-                               sk->sk_state = IUCV_DISCONN;
-
                        release_sock(sk);
                        return sk;
                }
@@ -739,7 +719,7 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
                if (!memcmp(dev->perm_addr, uid, 8)) {
                        memcpy(iucv->src_name, sa->siucv_name, 8);
                        memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
-                       sock->sk->sk_bound_dev_if = dev->ifindex;
+                       sk->sk_bound_dev_if = dev->ifindex;
                        sk->sk_state = IUCV_BOUND;
                        iucv->transport = AF_IUCV_TRANS_HIPER;
                        if (!iucv->msglimit)
@@ -774,16 +754,13 @@ done:
 static int iucv_sock_autobind(struct sock *sk)
 {
        struct iucv_sock *iucv = iucv_sk(sk);
-       char query_buffer[80];
        char name[12];
        int err = 0;
 
-       /* Set the userid and name */
-       cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
-       if (unlikely(err))
+       if (unlikely(!pr_iucv))
                return -EPROTO;
 
-       memcpy(iucv->src_user_id, query_buffer, 8);
+       memcpy(iucv->src_user_id, iucv_userid, 8);
 
        write_lock_bh(&iucv_sk_list.lock);
 
@@ -1225,6 +1202,8 @@ release:
        return len;
 
 fail:
+       if (skb->dev)
+               dev_put(skb->dev);
        kfree_skb(skb);
 out:
        release_sock(sk);
@@ -1357,7 +1336,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        int blen;
        int err = 0;
 
-       if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
+       if ((sk->sk_state == IUCV_DISCONN) &&
            skb_queue_empty(&iucv->backlog_skb_q) &&
            skb_queue_empty(&sk->sk_receive_queue) &&
            list_empty(&iucv->message_q.list))
@@ -1441,9 +1420,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                                        ETH_HLEN;
                                sskb = sock_alloc_send_skb(sk, blen, 1, &err);
                                if (sskb) {
-                                       skb_reserve(sskb,
-                                               sizeof(struct af_iucv_trans_hdr)
-                                               + ETH_HLEN);
+                                       skb_reserve(sskb, blen);
                                        err = afiucv_hs_send(NULL, sk, sskb,
                                                             AF_IUCV_FLAG_WIN);
                                }
@@ -1506,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_state == IUCV_CLOSED)
                mask |= POLLHUP;
 
-       if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
+       if (sk->sk_state == IUCV_DISCONN)
                mask |= POLLIN;
 
        if (sock_writeable(sk))
@@ -1533,7 +1510,6 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
        switch (sk->sk_state) {
        case IUCV_DISCONN:
        case IUCV_CLOSING:
-       case IUCV_SEVERED:
        case IUCV_CLOSED:
                err = -ENOTCONN;
                goto fail;
@@ -1888,10 +1864,7 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
 {
        struct sock *sk = path->private;
 
-       if (!list_empty(&iucv_sk(sk)->accept_q))
-               sk->sk_state = IUCV_SEVERED;
-       else
-               sk->sk_state = IUCV_DISCONN;
+       sk->sk_state = IUCV_DISCONN;
 
        sk->sk_state_change(sk);
 }
@@ -2051,10 +2024,7 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
        /* other end of connection closed */
        if (iucv) {
                bh_lock_sock(sk);
-               if (!list_empty(&iucv->accept_q))
-                       sk->sk_state = IUCV_SEVERED;
-               else
-                       sk->sk_state = IUCV_DISCONN;
+               sk->sk_state = IUCV_DISCONN;
                sk->sk_state_change(sk);
                bh_unlock_sock(sk);
        }
@@ -2209,6 +2179,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
                break;
        case 0:
                /* plain data frame */
+               memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
+                      CB_TRGCLS_LEN);
                err = afiucv_hs_callback_rx(sk, skb);
                break;
        default:
@@ -2259,6 +2231,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
                        case TX_NOTIFY_OK:
                                __skb_unlink(this, list);
                                iucv_sock_wake_msglim(sk);
+                               dev_put(this->dev);
                                kfree_skb(this);
                                break;
                        case TX_NOTIFY_PENDING:
@@ -2269,6 +2242,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
                                atomic_dec(&iucv->pendings);
                                if (atomic_read(&iucv->pendings) <= 0)
                                        iucv_sock_wake_msglim(sk);
+                               dev_put(this->dev);
                                kfree_skb(this);
                                break;
                        case TX_NOTIFY_UNREACHABLE:
@@ -2277,11 +2251,9 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
                        case TX_NOTIFY_GENERALERROR:
                        case TX_NOTIFY_DELAYED_GENERALERROR:
                                __skb_unlink(this, list);
+                               dev_put(this->dev);
                                kfree_skb(this);
-                               if (!list_empty(&iucv->accept_q))
-                                       sk->sk_state = IUCV_SEVERED;
-                               else
-                                       sk->sk_state = IUCV_DISCONN;
+                               sk->sk_state = IUCV_DISCONN;
                                sk->sk_state_change(sk);
                                break;
                        }
@@ -2291,6 +2263,13 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
        }
        spin_unlock_irqrestore(&list->lock, flags);
 
+       if (sk->sk_state == IUCV_CLOSING) {
+               if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
+                       sk->sk_state = IUCV_CLOSED;
+                       sk->sk_state_change(sk);
+               }
+       }
+
 out_unlock:
        bh_unlock_sock(sk);
 }
index bfc0bef..11dbb22 100644 (file)
@@ -375,7 +375,7 @@ static int verify_address_len(const void *p)
        const struct sadb_address *sp = p;
        const struct sockaddr *addr = (const struct sockaddr *)(sp + 1);
        const struct sockaddr_in *sin;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        const struct sockaddr_in6 *sin6;
 #endif
        int len;
@@ -387,7 +387,7 @@ static int verify_address_len(const void *p)
                    sp->sadb_address_prefixlen > 32)
                        return -EINVAL;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin6), sizeof(uint64_t));
                if (sp->sadb_address_len != len ||
@@ -469,7 +469,7 @@ static int present_and_same_family(const struct sadb_address *src,
        if (s_addr->sa_family != d_addr->sa_family)
                return 0;
        if (s_addr->sa_family != AF_INET
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
            && s_addr->sa_family != AF_INET6
 #endif
                )
@@ -579,7 +579,7 @@ static inline int pfkey_sockaddr_len(sa_family_t family)
        switch (family) {
        case AF_INET:
                return sizeof(struct sockaddr_in);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                return sizeof(struct sockaddr_in6);
 #endif
@@ -595,7 +595,7 @@ int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr)
                xaddr->a4 =
                        ((struct sockaddr_in *)sa)->sin_addr.s_addr;
                return AF_INET;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                memcpy(xaddr->a6,
                       &((struct sockaddr_in6 *)sa)->sin6_addr,
@@ -639,7 +639,7 @@ static struct  xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct
        case AF_INET:
                xaddr = (xfrm_address_t *)&((const struct sockaddr_in *)(addr + 1))->sin_addr;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                xaddr = (xfrm_address_t *)&((const struct sockaddr_in6 *)(addr + 1))->sin6_addr;
                break;
@@ -705,7 +705,7 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port
                memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
                return 32;
            }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
            {
                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
@@ -1311,7 +1311,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
                xdaddr = (xfrm_address_t *)&((struct sockaddr_in *)(daddr + 1))->sin_addr.s_addr;
                xsaddr = (xfrm_address_t *)&((struct sockaddr_in *)(saddr + 1))->sin_addr.s_addr;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                xdaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(daddr + 1))->sin6_addr;
                xsaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(saddr + 1))->sin6_addr;
@@ -3146,7 +3146,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
                        return NULL;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                if (opt != IPV6_IPSEC_POLICY) {
                        *dir = -EOPNOTSUPP;
index dfd3a64..a18e6c3 100644 (file)
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
                copied += used;
                len -= used;
 
+               /* For non stream protcols we get one packet per recvmsg call */
+               if (sk->sk_type != SOCK_STREAM)
+                       goto copy_uaddr;
+
                if (!(flags & MSG_PEEK)) {
                        sk_eat_skb(sk, skb, 0);
                        *seq = 0;
                }
 
-               /* For non stream protcols we get one packet per recvmsg call */
-               if (sk->sk_type != SOCK_STREAM)
-                       goto copy_uaddr;
-
                /* Partial read */
                if (used + offset < skb->len)
                        continue;
@@ -857,6 +857,12 @@ copy_uaddr:
        }
        if (llc_sk(sk)->cmsg_flags)
                llc_cmsg_rcv(msg, skb);
+
+       if (!(flags & MSG_PEEK)) {
+                       sk_eat_skb(sk, skb, 0);
+                       *seq = 0;
+       }
+
        goto out;
 }
 
index 7d3b438..96ddb72 100644 (file)
@@ -247,15 +247,3 @@ config MAC80211_DEBUG_COUNTERS
          and show them in debugfs.
 
          If unsure, say N.
-
-config MAC80211_DRIVER_API_TRACER
-       bool "Driver API tracer"
-       depends on MAC80211_DEBUG_MENU
-       depends on EVENT_TRACING
-       help
-         Say Y here to make mac80211 register with the ftrace
-         framework for the driver API -- you can then see which
-         driver methods it is calling and which API functions
-         drivers are calling by looking at the trace.
-
-         If unsure, say Y.
index fdb54e6..d540c3b 100644 (file)
@@ -24,7 +24,8 @@ mac80211-y := \
        util.o \
        wme.o \
        event.o \
-       chan.o
+       chan.o \
+       driver-trace.o
 
 mac80211-$(CONFIG_MAC80211_LEDS) += led.o
 mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
@@ -41,7 +42,6 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
 
 mac80211-$(CONFIG_PM) += pm.o
 
-mac80211-$(CONFIG_MAC80211_DRIVER_API_TRACER) += driver-trace.o
 CFLAGS_driver-trace.o := -I$(src)
 
 # objects for PID algorithm
index 476b106..96debba 100644 (file)
@@ -73,8 +73,11 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
        RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
-       printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
-              sta->sta.addr, tid);
+       printk(KERN_DEBUG
+              "Rx BA session stop requested for %pM tid %u %s reason: %d\n",
+              sta->sta.addr, tid,
+              initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
+              (int)reason);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
        if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
@@ -85,7 +88,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
        /* check if this is a self generated aggregation halt */
        if (initiator == WLAN_BACK_RECIPIENT && tx)
                ieee80211_send_delba(sta->sdata, sta->sta.addr,
-                                    tid, 0, reason);
+                                    tid, WLAN_BACK_RECIPIENT, reason);
 
        del_timer_sync(&tid_rx->session_timer);
        del_timer_sync(&tid_rx->reorder_timer);
@@ -109,7 +112,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
        int i;
 
        rcu_read_lock();
-       sta = sta_info_get(sdata, addr);
+       sta = sta_info_get_bss(sdata, addr);
        if (!sta) {
                rcu_read_unlock();
                return;
@@ -182,6 +185,8 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
                memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
        else if (sdata->vif.type == NL80211_IFTYPE_STATION)
                memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+       else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+               memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
 
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_ACTION);
index 5567657..e92f98d 100644 (file)
@@ -55,6 +55,8 @@
  * @ampdu_action function will be called with the action
  * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail,
  * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe().
+ * Note that the sta can get destroyed before the BA tear down is
+ * complete.
  */
 
 static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
@@ -83,6 +85,8 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
                memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
        else if (sdata->vif.type == NL80211_IFTYPE_STATION)
                memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+       else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+               memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
 
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_ACTION);
@@ -103,7 +107,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
        mgmt->u.action.u.addba_req.start_seq_num =
                                        cpu_to_le16(start_seq_num << 4);
 
-       ieee80211_tx_skb(sdata, skb);
+       ieee80211_tx_skb_tid(sdata, skb, tid);
 }
 
 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
@@ -132,7 +136,7 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
        bar->start_seq_num = cpu_to_le16(ssn);
 
        IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
-       ieee80211_tx_skb(sdata, skb);
+       ieee80211_tx_skb_tid(sdata, skb, tid);
 }
 EXPORT_SYMBOL(ieee80211_send_bar);
 
@@ -186,6 +190,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
        del_timer_sync(&tid_tx->addba_resp_timer);
+       del_timer_sync(&tid_tx->session_timer);
 
        /*
         * After this packets are no longer handed right through
@@ -304,6 +309,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
        __release(agg_queue);
 }
 
+/*
+ * splice packets from the STA's pending to the local pending,
+ * requires a call to ieee80211_agg_splice_finish later
+ */
+static void __acquires(agg_queue)
+ieee80211_agg_splice_packets(struct ieee80211_local *local,
+                            struct tid_ampdu_tx *tid_tx, u16 tid)
+{
+       int queue = ieee80211_ac_from_tid(tid);
+       unsigned long flags;
+
+       ieee80211_stop_queue_agg(local, tid);
+
+       if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+                         " from the pending queue\n", tid))
+               return;
+
+       if (!skb_queue_empty(&tid_tx->pending)) {
+               spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+               /* copy over remaining packets */
+               skb_queue_splice_tail_init(&tid_tx->pending,
+                                          &local->pending[queue]);
+               spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+       }
+}
+
+static void __releases(agg_queue)
+ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
+{
+       ieee80211_wake_queue_agg(local, tid);
+}
+
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
 {
        struct tid_ampdu_tx *tid_tx;
@@ -315,19 +352,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
        tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 
        /*
-        * While we're asking the driver about the aggregation,
-        * stop the AC queue so that we don't have to worry
-        * about frames that came in while we were doing that,
-        * which would require us to put them to the AC pending
-        * afterwards which just makes the code more complex.
+        * Start queuing up packets for this aggregation session.
+        * We're going to release them once the driver is OK with
+        * that.
         */
-       ieee80211_stop_queue_agg(local, tid);
-
        clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
 
        /*
-        * make sure no packets are being processed to get
-        * valid starting sequence number
+        * Make sure no packets are being processed. This ensures that
+        * we have a valid starting sequence number and that in-flight
+        * packets have been flushed out and no packets for this TID
+        * will go into the driver during the ampdu_action call.
         */
        synchronize_net();
 
@@ -341,17 +376,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
                                        " tid %d\n", tid);
 #endif
                spin_lock_bh(&sta->lock);
+               ieee80211_agg_splice_packets(local, tid_tx, tid);
                ieee80211_assign_tid_tx(sta, tid, NULL);
+               ieee80211_agg_splice_finish(local, tid);
                spin_unlock_bh(&sta->lock);
 
-               ieee80211_wake_queue_agg(local, tid);
                kfree_rcu(tid_tx, rcu_head);
                return;
        }
 
-       /* we can take packets again now */
-       ieee80211_wake_queue_agg(local, tid);
-
        /* activate the timer for the recipient's addBA response */
        mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
 #ifdef CONFIG_MAC80211_HT_DEBUG
@@ -369,6 +402,28 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
                                     tid_tx->timeout);
 }
 
+/*
+ * After accepting the AddBA Response we activated a timer,
+ * resetting it after each frame that we send.
+ */
+static void sta_tx_agg_session_timer_expired(unsigned long data)
+{
+       /* not an elegant detour, but there is no choice as the timer passes
+        * only one argument, and various sta_info are needed here, so init
+        * flow in sta_info_create gives the TID as data, while the timer_to_id
+        * array gives the sta through container_of */
+       u8 *ptid = (u8 *)data;
+       u8 *timer_to_id = ptid - *ptid;
+       struct sta_info *sta = container_of(timer_to_id, struct sta_info,
+                                        timer_to_tid[0]);
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+       printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
+#endif
+
+       ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
+}
+
 int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
                                  u16 timeout)
 {
@@ -396,7 +451,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
        if (sdata->vif.type != NL80211_IFTYPE_STATION &&
            sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
            sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
-           sdata->vif.type != NL80211_IFTYPE_AP)
+           sdata->vif.type != NL80211_IFTYPE_AP &&
+           sdata->vif.type != NL80211_IFTYPE_ADHOC)
                return -EINVAL;
 
        if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
@@ -407,6 +463,27 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
                return -EINVAL;
        }
 
+       /*
+        * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
+        * member of an IBSS, and has no other existing Block Ack agreement
+        * with the recipient STA, then the initiating STA shall transmit a
+        * Probe Request frame to the recipient STA and shall not transmit an
+        * ADDBA Request frame unless it receives a Probe Response frame
+        * from the recipient within dot11ADDBAFailureTimeout.
+        *
+        * The probe request mechanism for ADDBA is currently not implemented,
+        * but we only build up Block Ack session with HT STAs. This information
+        * is set when we receive a bss info from a probe response or a beacon.
+        */
+       if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
+           !sta->sta.ht_cap.ht_supported) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+               printk(KERN_DEBUG "BA request denied - IBSS STA %pM"
+                      "does not advertise HT support\n", pubsta->addr);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+               return -EINVAL;
+       }
+
        spin_lock_bh(&sta->lock);
 
        /* we have tried too many times, receiver does not want A-MPDU */
@@ -438,11 +515,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
 
        tid_tx->timeout = timeout;
 
-       /* Tx timer */
+       /* response timer */
        tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
        tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
        init_timer(&tid_tx->addba_resp_timer);
 
+       /* tx timer */
+       tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
+       tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+       init_timer(&tid_tx->session_timer);
+
        /* assign a dialog token */
        sta->ampdu_mlme.dialog_token_allocator++;
        tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
@@ -462,38 +544,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
 }
 EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
 
-/*
- * splice packets from the STA's pending to the local pending,
- * requires a call to ieee80211_agg_splice_finish later
- */
-static void __acquires(agg_queue)
-ieee80211_agg_splice_packets(struct ieee80211_local *local,
-                            struct tid_ampdu_tx *tid_tx, u16 tid)
-{
-       int queue = ieee80211_ac_from_tid(tid);
-       unsigned long flags;
-
-       ieee80211_stop_queue_agg(local, tid);
-
-       if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
-                         " from the pending queue\n", tid))
-               return;
-
-       if (!skb_queue_empty(&tid_tx->pending)) {
-               spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-               /* copy over remaining packets */
-               skb_queue_splice_tail_init(&tid_tx->pending,
-                                          &local->pending[queue]);
-               spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
-       }
-}
-
-static void __releases(agg_queue)
-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
-{
-       ieee80211_wake_queue_agg(local, tid);
-}
-
 static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
                                         struct sta_info *sta, u16 tid)
 {
@@ -547,7 +597,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
        }
 
        mutex_lock(&local->sta_mtx);
-       sta = sta_info_get(sdata, ra);
+       sta = sta_info_get_bss(sdata, ra);
        if (!sta) {
                mutex_unlock(&local->sta_mtx);
 #ifdef CONFIG_MAC80211_HT_DEBUG
@@ -676,7 +726,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
 
        mutex_lock(&local->sta_mtx);
 
-       sta = sta_info_get(sdata, ra);
+       sta = sta_info_get_bss(sdata, ra);
        if (!sta) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
                printk(KERN_DEBUG "Could not find station: %pM\n", ra);
@@ -814,6 +864,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
                        ieee80211_agg_tx_operational(local, sta, tid);
 
                sta->ampdu_mlme.addba_req_num[tid] = 0;
+
+               if (tid_tx->timeout)
+                       mod_timer(&tid_tx->session_timer,
+                                 TU_TO_EXP_TIME(tid_tx->timeout));
+
        } else {
                ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
                                                true);
index 2577c45..66ad9d9 100644 (file)
@@ -102,6 +102,16 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
        return 0;
 }
 
+static int ieee80211_set_noack_map(struct wiphy *wiphy,
+                                 struct net_device *dev,
+                                 u16 noack_map)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+       sdata->noack_map = noack_map;
+       return 0;
+}
+
 static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
                             u8 key_idx, bool pairwise, const u8 *mac_addr,
                             struct key_params *params)
@@ -499,7 +509,7 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
        if (!resp || !resp_len)
                return -EINVAL;
 
-       old = sdata->u.ap.probe_resp;
+       old = rtnl_dereference(sdata->u.ap.probe_resp);
 
        new = dev_alloc_skb(resp_len);
        if (!new)
@@ -736,10 +746,11 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
        netif_rx_ni(skb);
 }
 
-static void sta_apply_parameters(struct ieee80211_local *local,
-                                struct sta_info *sta,
-                                struct station_parameters *params)
+static int sta_apply_parameters(struct ieee80211_local *local,
+                               struct sta_info *sta,
+                               struct station_parameters *params)
 {
+       int ret = 0;
        u32 rates;
        int i, j;
        struct ieee80211_supported_band *sband;
@@ -751,13 +762,59 @@ static void sta_apply_parameters(struct ieee80211_local *local,
        mask = params->sta_flags_mask;
        set = params->sta_flags_set;
 
+       /*
+        * In mesh mode, we can clear AUTHENTICATED flag but must
+        * also make ASSOCIATED follow appropriately for the driver
+        * API. See also below, after AUTHORIZED changes.
+        */
+       if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
+               /* cfg80211 should not allow this in non-mesh modes */
+               if (WARN_ON(!ieee80211_vif_is_mesh(&sdata->vif)))
+                       return -EINVAL;
+
+               if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED) &&
+                   !test_sta_flag(sta, WLAN_STA_AUTH)) {
+                       ret = sta_info_move_state_checked(sta,
+                                       IEEE80211_STA_AUTH);
+                       if (ret)
+                               return ret;
+                       ret = sta_info_move_state_checked(sta,
+                                       IEEE80211_STA_ASSOC);
+                       if (ret)
+                               return ret;
+               }
+       }
+
        if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
                if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
-                       set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+                       ret = sta_info_move_state_checked(sta,
+                                       IEEE80211_STA_AUTHORIZED);
                else
-                       clear_sta_flag(sta, WLAN_STA_AUTHORIZED);
+                       ret = sta_info_move_state_checked(sta,
+                                       IEEE80211_STA_ASSOC);
+               if (ret)
+                       return ret;
+       }
+
+       if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
+               /* cfg80211 should not allow this in non-mesh modes */
+               if (WARN_ON(!ieee80211_vif_is_mesh(&sdata->vif)))
+                       return -EINVAL;
+
+               if (!(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) &&
+                   test_sta_flag(sta, WLAN_STA_AUTH)) {
+                       ret = sta_info_move_state_checked(sta,
+                                       IEEE80211_STA_AUTH);
+                       if (ret)
+                               return ret;
+                       ret = sta_info_move_state_checked(sta,
+                                       IEEE80211_STA_NONE);
+                       if (ret)
+                               return ret;
+               }
        }
 
+
        if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
                if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
                        set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
@@ -782,13 +839,6 @@ static void sta_apply_parameters(struct ieee80211_local *local,
                        clear_sta_flag(sta, WLAN_STA_MFP);
        }
 
-       if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
-               if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
-                       set_sta_flag(sta, WLAN_STA_AUTH);
-               else
-                       clear_sta_flag(sta, WLAN_STA_AUTH);
-       }
-
        if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) {
                if (set & BIT(NL80211_STA_FLAG_TDLS_PEER))
                        set_sta_flag(sta, WLAN_STA_TDLS_PEER);
@@ -860,6 +910,8 @@ static void sta_apply_parameters(struct ieee80211_local *local,
                        }
 #endif
        }
+
+       return 0;
 }
 
 static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
@@ -886,20 +938,18 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
        if (is_multicast_ether_addr(mac))
                return -EINVAL;
 
-       /* Only TDLS-supporting stations can add TDLS peers */
-       if ((params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
-           !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
-             sdata->vif.type == NL80211_IFTYPE_STATION))
-               return -ENOTSUPP;
-
        sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
        if (!sta)
                return -ENOMEM;
 
-       set_sta_flag(sta, WLAN_STA_AUTH);
-       set_sta_flag(sta, WLAN_STA_ASSOC);
+       sta_info_move_state(sta, IEEE80211_STA_AUTH);
+       sta_info_move_state(sta, IEEE80211_STA_ASSOC);
 
-       sta_apply_parameters(local, sta, params);
+       err = sta_apply_parameters(local, sta, params);
+       if (err) {
+               sta_info_free(local, sta);
+               return err;
+       }
 
        /*
         * for TDLS, rate control should be initialized only when supported
@@ -950,19 +1000,19 @@ static int ieee80211_change_station(struct wiphy *wiphy,
        struct sta_info *sta;
        struct ieee80211_sub_if_data *vlansdata;
 
-       rcu_read_lock();
+       mutex_lock(&local->sta_mtx);
 
        sta = sta_info_get_bss(sdata, mac);
        if (!sta) {
-               rcu_read_unlock();
+               mutex_unlock(&local->sta_mtx);
                return -ENOENT;
        }
 
-       /* The TDLS bit cannot be toggled after the STA was added */
-       if ((params->sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
-           !!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) !=
-           !!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
-               rcu_read_unlock();
+       /* in station mode, supported rates are only valid with TDLS */
+       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+           params->supported_rates &&
+           !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+               mutex_unlock(&local->sta_mtx);
                return -EINVAL;
        }
 
@@ -971,13 +1021,13 @@ static int ieee80211_change_station(struct wiphy *wiphy,
 
                if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
                    vlansdata->vif.type != NL80211_IFTYPE_AP) {
-                       rcu_read_unlock();
+                       mutex_unlock(&local->sta_mtx);
                        return -EINVAL;
                }
 
                if (params->vlan->ieee80211_ptr->use_4addr) {
                        if (vlansdata->u.vlan.sta) {
-                               rcu_read_unlock();
+                               mutex_unlock(&local->sta_mtx);
                                return -EBUSY;
                        }
 
@@ -993,7 +1043,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
        if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates)
                rate_control_rate_init(sta);
 
-       rcu_read_unlock();
+       mutex_unlock(&local->sta_mtx);
 
        if (sdata->vif.type == NL80211_IFTYPE_STATION &&
            params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))
@@ -1185,6 +1235,8 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
 {
        u8 *new_ie;
        const u8 *old_ie;
+       struct ieee80211_sub_if_data *sdata = container_of(ifmsh,
+                                       struct ieee80211_sub_if_data, u.mesh);
 
        /* allocate information elements */
        new_ie = NULL;
@@ -1211,6 +1263,10 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
        if (setup->is_secure)
                ifmsh->security |= IEEE80211_MESH_SEC_SECURED;
 
+       /* mcast rate setting in Mesh Node */
+       memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate,
+                                               sizeof(setup->mcast_rate));
+
        return 0;
 }
 
@@ -1256,6 +1312,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
        if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, mask))
                conf->dot11MeshHWMPpreqMinInterval =
                        nconf->dot11MeshHWMPpreqMinInterval;
+       if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, mask))
+               conf->dot11MeshHWMPperrMinInterval =
+                       nconf->dot11MeshHWMPperrMinInterval;
        if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
                           mask))
                conf->dot11MeshHWMPnetDiameterTraversalTime =
@@ -2698,4 +2757,5 @@ struct cfg80211_ops mac80211_config_ops = {
        .tdls_mgmt = ieee80211_tdls_mgmt,
        .probe_client = ieee80211_probe_client,
        .get_channel = ieee80211_wiphy_get_channel,
+       .set_noack_map = ieee80211_set_noack_map,
 };
index 00cefcb..90baea5 100644 (file)
@@ -97,40 +97,6 @@ static const struct file_operations reset_ops = {
        .llseek = noop_llseek,
 };
 
-static ssize_t noack_read(struct file *file, char __user *user_buf,
-                         size_t count, loff_t *ppos)
-{
-       struct ieee80211_local *local = file->private_data;
-
-       return mac80211_format_buffer(user_buf, count, ppos, "%d\n",
-                                     local->wifi_wme_noack_test);
-}
-
-static ssize_t noack_write(struct file *file,
-                          const char __user *user_buf,
-                          size_t count, loff_t *ppos)
-{
-       struct ieee80211_local *local = file->private_data;
-       char buf[10];
-       size_t len;
-
-       len = min(count, sizeof(buf) - 1);
-       if (copy_from_user(buf, user_buf, len))
-               return -EFAULT;
-       buf[len] = '\0';
-
-       local->wifi_wme_noack_test = !!simple_strtoul(buf, NULL, 0);
-
-       return count;
-}
-
-static const struct file_operations noack_ops = {
-       .read = noack_read,
-       .write = noack_write,
-       .open = mac80211_open_file_generic,
-       .llseek = default_llseek,
-};
-
 static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
                                 size_t count, loff_t *ppos)
 {
@@ -398,7 +364,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
        DEBUGFS_ADD(wep_iv);
        DEBUGFS_ADD(queues);
        DEBUGFS_ADD_MODE(reset, 0200);
-       DEBUGFS_ADD(noack);
        DEBUGFS_ADD(uapsd_queues);
        DEBUGFS_ADD(uapsd_max_sp_len);
        DEBUGFS_ADD(channel_type);
index 9352819..176c08f 100644 (file)
@@ -321,6 +321,7 @@ static ssize_t ieee80211_if_parse_tkip_mic_test(
 __IEEE80211_IF_FILE_W(tkip_mic_test);
 
 /* AP attributes */
+IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC);
 IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
 IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
 
@@ -405,6 +406,8 @@ IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout,
                u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
 IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
                u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval,
+               u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
 IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
                u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
 IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
@@ -456,6 +459,7 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
        DEBUGFS_ADD(rc_rateidx_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 
+       DEBUGFS_ADD(num_sta_authorized);
        DEBUGFS_ADD(num_sta_ps);
        DEBUGFS_ADD(dtim_count);
        DEBUGFS_ADD(num_buffered_multicast);
@@ -534,6 +538,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
        MESHPARAMS_ADD(dot11MeshMaxPeerLinks);
        MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout);
        MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval);
+       MESHPARAMS_ADD(dot11MeshHWMPperrMinInterval);
        MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime);
        MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
        MESHPARAMS_ADD(path_refresh_time);
index 49cc5e0..e8960ae 100644 (file)
@@ -10,6 +10,16 @@ static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
        WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER));
 }
 
+static inline struct ieee80211_sub_if_data *
+get_bss_sdata(struct ieee80211_sub_if_data *sdata)
+{
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+               sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+                                    u.ap);
+
+       return sdata;
+}
+
 static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
 {
        local->ops->tx(&local->hw, skb);
@@ -421,6 +431,7 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
                                  enum sta_notify_cmd cmd,
                                  struct ieee80211_sta *sta)
 {
+       sdata = get_bss_sdata(sdata);
        check_sdata_in_driver(sdata);
 
        trace_drv_sta_notify(local, sdata, cmd, sta);
@@ -437,6 +448,7 @@ static inline int drv_sta_add(struct ieee80211_local *local,
 
        might_sleep();
 
+       sdata = get_bss_sdata(sdata);
        check_sdata_in_driver(sdata);
 
        trace_drv_sta_add(local, sdata, sta);
@@ -454,6 +466,7 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
 {
        might_sleep();
 
+       sdata = get_bss_sdata(sdata);
        check_sdata_in_driver(sdata);
 
        trace_drv_sta_remove(local, sdata, sta);
@@ -547,6 +560,7 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
 
        might_sleep();
 
+       sdata = get_bss_sdata(sdata);
        check_sdata_in_driver(sdata);
 
        trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
index 2af4fca..6e9df8f 100644 (file)
@@ -5,17 +5,6 @@
 #include <net/mac80211.h>
 #include "ieee80211_i.h"
 
-#if !defined(CONFIG_MAC80211_DRIVER_API_TRACER) || defined(__CHECKER__)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(...)
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(evt_class, name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mac80211
 
index 810cfbe..f25fff7 100644 (file)
@@ -28,9 +28,9 @@ bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata)
        return false;
 }
 
-void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
-                          struct ieee80211_sta_ht_cap *ht_cap,
-                          u16 flag)
+static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
+                                 struct ieee80211_sta_ht_cap *ht_cap,
+                                 u16 flag)
 {
        __le16 le_flag = cpu_to_le16(flag);
        if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) {
@@ -47,7 +47,9 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
        int i;
 
        if (sdata->vif.type != NL80211_IFTYPE_STATION) {
-               WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
+               /* AP interfaces call this code when adding new stations,
+                * so just silently ignore non station interfaces.
+                */
                return;
        }
 
@@ -282,6 +284,8 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
                memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
        else if (sdata->vif.type == NL80211_IFTYPE_STATION)
                memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+       else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+               memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
 
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_ACTION);
@@ -296,7 +300,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
        mgmt->u.action.u.delba.params = cpu_to_le16(params);
        mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
 
-       ieee80211_tx_skb(sdata, skb);
+       ieee80211_tx_skb_tid(sdata, skb, tid);
 }
 
 void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
index 7d84af7..f8a32bf 100644 (file)
@@ -77,6 +77,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        struct cfg80211_bss *bss;
        u32 bss_change;
        u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
+       enum nl80211_channel_type channel_type;
 
        lockdep_assert_held(&ifibss->mtx);
 
@@ -105,8 +106,16 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 
        sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
 
-       local->oper_channel = chan;
-       WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
+       channel_type = ifibss->channel_type;
+       if (channel_type > NL80211_CHAN_HT20 &&
+           !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
+               channel_type = NL80211_CHAN_HT20;
+       if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
+               /* can only fail due to HT40+/- mismatch */
+               channel_type = NL80211_CHAN_HT20;
+               WARN_ON(!ieee80211_set_channel_type(local, sdata,
+                                                   NL80211_CHAN_HT20));
+       }
        ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
        sband = local->hw.wiphy->bands[chan->band];
@@ -172,6 +181,19 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                memcpy(skb_put(skb, ifibss->ie_len),
                       ifibss->ie, ifibss->ie_len);
 
+       /* add HT capability and information IEs */
+       if (channel_type && sband->ht_cap.ht_supported) {
+               pos = skb_put(skb, 4 +
+                                  sizeof(struct ieee80211_ht_cap) +
+                                  sizeof(struct ieee80211_ht_info));
+               pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
+                                               sband->ht_cap.cap);
+               pos = ieee80211_ie_build_ht_info(pos,
+                                                &sband->ht_cap,
+                                                chan,
+                                                channel_type);
+       }
+
        if (local->hw.queues >= 4) {
                pos = skb_put(skb, 9);
                *pos++ = WLAN_EID_VENDOR_SPECIFIC;
@@ -195,6 +217,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        bss_change |= BSS_CHANGED_BEACON;
        bss_change |= BSS_CHANGED_BEACON_ENABLED;
        bss_change |= BSS_CHANGED_BASIC_RATES;
+       bss_change |= BSS_CHANGED_HT;
        bss_change |= BSS_CHANGED_IBSS;
        sdata->vif.bss_conf.ibss_joined = true;
        ieee80211_bss_info_change_notify(sdata, bss_change);
@@ -252,6 +275,80 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                                  cbss->tsf);
 }
 
+static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
+       __acquires(RCU)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       u8 addr[ETH_ALEN];
+
+       memcpy(addr, sta->sta.addr, ETH_ALEN);
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+       wiphy_debug(sdata->local->hw.wiphy,
+                   "Adding new IBSS station %pM (dev=%s)\n",
+                   addr, sdata->name);
+#endif
+
+       sta_info_move_state(sta, IEEE80211_STA_AUTH);
+       sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+       sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
+
+       rate_control_rate_init(sta);
+
+       /* If it fails, maybe we raced another insertion? */
+       if (sta_info_insert_rcu(sta))
+               return sta_info_get(sdata, addr);
+       return sta;
+}
+
+static struct sta_info *
+ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
+                      const u8 *bssid, const u8 *addr,
+                      u32 supp_rates)
+       __acquires(RCU)
+{
+       struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+       struct ieee80211_local *local = sdata->local;
+       struct sta_info *sta;
+       int band = local->hw.conf.channel->band;
+
+       /*
+        * XXX: Consider removing the least recently used entry and
+        *      allow new one to be added.
+        */
+       if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
+               if (net_ratelimit())
+                       printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
+                              sdata->name, addr);
+               rcu_read_lock();
+               return NULL;
+       }
+
+       if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) {
+               rcu_read_lock();
+               return NULL;
+       }
+
+       if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) {
+               rcu_read_lock();
+               return NULL;
+       }
+
+       sta = sta_info_alloc(sdata, addr, GFP_KERNEL);
+       if (!sta) {
+               rcu_read_lock();
+               return NULL;
+       }
+
+       sta->last_rx = jiffies;
+
+       /* make sure mandatory rates are always added */
+       sta->sta.supp_rates[band] = supp_rates |
+                       ieee80211_mandatory_rates(local, band);
+
+       return ieee80211_ibss_finish_sta(sta);
+}
+
 static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                                  struct ieee80211_mgmt *mgmt,
                                  size_t len,
@@ -268,6 +365,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        u64 beacon_timestamp, rx_timestamp;
        u32 supp_rates = 0;
        enum ieee80211_band band = rx_status->band;
+       struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+       bool rates_updated = false;
 
        if (elems->ds_params && elems->ds_params_len == 1)
                freq = ieee80211_channel_to_frequency(elems->ds_params[0],
@@ -307,17 +406,51 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                                                prev_rates,
                                                sta->sta.supp_rates[band]);
 #endif
-                                       rate_control_rate_init(sta);
+                                       rates_updated = true;
                                }
-                       } else
+                       } else {
+                               rcu_read_unlock();
                                sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
-                                               mgmt->sa, supp_rates,
-                                               GFP_ATOMIC);
+                                               mgmt->sa, supp_rates);
+                       }
                }
 
                if (sta && elems->wmm_info)
                        set_sta_flag(sta, WLAN_STA_WME);
 
+               if (sta && elems->ht_info_elem && elems->ht_cap_elem &&
+                   sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
+                       /* we both use HT */
+                       struct ieee80211_sta_ht_cap sta_ht_cap_new;
+                       enum nl80211_channel_type channel_type =
+                               ieee80211_ht_info_to_channel_type(
+                                                       elems->ht_info_elem);
+
+                       ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+                                                         elems->ht_cap_elem,
+                                                         &sta_ht_cap_new);
+
+                       /*
+                        * fall back to HT20 if we don't use or use
+                        * the other extension channel
+                        */
+                       if ((channel_type == NL80211_CHAN_HT40MINUS ||
+                            channel_type == NL80211_CHAN_HT40PLUS) &&
+                           channel_type != sdata->u.ibss.channel_type)
+                               sta_ht_cap_new.cap &=
+                                       ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+                       if (memcmp(&sta->sta.ht_cap, &sta_ht_cap_new,
+                                  sizeof(sta_ht_cap_new))) {
+                               memcpy(&sta->sta.ht_cap, &sta_ht_cap_new,
+                                      sizeof(sta_ht_cap_new));
+                               rates_updated = true;
+                       }
+               }
+
+               if (sta && rates_updated)
+                       rate_control_rate_init(sta);
+
                rcu_read_unlock();
        }
 
@@ -406,21 +539,17 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                ieee80211_sta_join_ibss(sdata, bss);
                supp_rates = ieee80211_sta_get_rates(local, elems, band);
                ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
-                                      supp_rates, GFP_KERNEL);
+                                      supp_rates);
+               rcu_read_unlock();
        }
 
  put_bss:
        ieee80211_rx_bss_put(local, bss);
 }
 
-/*
- * Add a new IBSS station, will also be called by the RX code when,
- * in IBSS mode, receiving a frame from a yet-unknown station, hence
- * must be callable in atomic context.
- */
-struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
-                                       u8 *bssid, u8 *addr, u32 supp_rates,
-                                       gfp_t gfp)
+void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
+                             const u8 *bssid, const u8 *addr,
+                             u32 supp_rates)
 {
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
@@ -435,37 +564,29 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
                if (net_ratelimit())
                        printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
                               sdata->name, addr);
-               return NULL;
+               return;
        }
 
        if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH)
-               return NULL;
+               return;
 
        if (compare_ether_addr(bssid, sdata->u.ibss.bssid))
-               return NULL;
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-       wiphy_debug(local->hw.wiphy, "Adding new IBSS station %pM (dev=%s)\n",
-                   addr, sdata->name);
-#endif
+               return;
 
-       sta = sta_info_alloc(sdata, addr, gfp);
+       sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
        if (!sta)
-               return NULL;
+               return;
 
        sta->last_rx = jiffies;
-       set_sta_flag(sta, WLAN_STA_AUTHORIZED);
 
        /* make sure mandatory rates are always added */
        sta->sta.supp_rates[band] = supp_rates |
                        ieee80211_mandatory_rates(local, band);
 
-       rate_control_rate_init(sta);
-
-       /* If it fails, maybe we raced another insertion? */
-       if (sta_info_insert(sta))
-               return sta_info_get(sdata, addr);
-       return sta;
+       spin_lock(&ifibss->incomplete_lock);
+       list_add(&sta->list, &ifibss->incomplete_stations);
+       spin_unlock(&ifibss->incomplete_lock);
+       ieee80211_queue_work(&local->hw, &sdata->work);
 }
 
 static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
@@ -804,6 +925,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+       struct sta_info *sta;
 
        mutex_lock(&ifibss->mtx);
 
@@ -815,6 +937,19 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
        if (!ifibss->ssid_len)
                goto out;
 
+       spin_lock_bh(&ifibss->incomplete_lock);
+       while (!list_empty(&ifibss->incomplete_stations)) {
+               sta = list_first_entry(&ifibss->incomplete_stations,
+                                      struct sta_info, list);
+               list_del(&sta->list);
+               spin_unlock_bh(&ifibss->incomplete_lock);
+
+               ieee80211_ibss_finish_sta(sta);
+               rcu_read_unlock();
+               spin_lock_bh(&ifibss->incomplete_lock);
+       }
+       spin_unlock_bh(&ifibss->incomplete_lock);
+
        switch (ifibss->state) {
        case IEEE80211_IBSS_MLME_SEARCH:
                ieee80211_sta_find_ibss(sdata);
@@ -873,6 +1008,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
        setup_timer(&ifibss->timer, ieee80211_ibss_timer,
                    (unsigned long) sdata);
        mutex_init(&ifibss->mtx);
+       INIT_LIST_HEAD(&ifibss->incomplete_stations);
+       spin_lock_init(&ifibss->incomplete_lock);
 }
 
 /* scan finished notification */
@@ -896,12 +1033,18 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
                        struct cfg80211_ibss_params *params)
 {
        struct sk_buff *skb;
+       u32 changed = 0;
 
        skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom +
-                           36 /* bitrates */ +
-                           34 /* SSID */ +
-                           3  /* DS params */ +
-                           4  /* IBSS params */ +
+                           sizeof(struct ieee80211_hdr_3addr) +
+                           12 /* struct ieee80211_mgmt.u.beacon */ +
+                           2 + IEEE80211_MAX_SSID_LEN /* max SSID */ +
+                           2 + 8 /* max Supported Rates */ +
+                           3 /* max DS params */ +
+                           4 /* IBSS params */ +
+                           2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+                           2 + sizeof(struct ieee80211_ht_cap) +
+                           2 + sizeof(struct ieee80211_ht_info) +
                            params->ie_len);
        if (!skb)
                return -ENOMEM;
@@ -922,13 +1065,18 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
        sdata->vif.bss_conf.beacon_int = params->beacon_interval;
 
        sdata->u.ibss.channel = params->channel;
+       sdata->u.ibss.channel_type = params->channel_type;
        sdata->u.ibss.fixed_channel = params->channel_fixed;
 
        /* fix ourselves to that channel now already */
        if (params->channel_fixed) {
                sdata->local->oper_channel = params->channel;
-               WARN_ON(!ieee80211_set_channel_type(sdata->local, sdata,
-                                                   NL80211_CHAN_NO_HT));
+               if (!ieee80211_set_channel_type(sdata->local, sdata,
+                                              params->channel_type)) {
+                       mutex_unlock(&sdata->u.ibss.mtx);
+                       kfree_skb(skb);
+                       return -EINVAL;
+               }
        }
 
        if (params->ie) {
@@ -951,6 +1099,23 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
        ieee80211_recalc_idle(sdata->local);
        mutex_unlock(&sdata->local->mtx);
 
+       /*
+        * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
+        * reserved, but an HT STA shall protect HT transmissions as though
+        * the HT Protection field were set to non-HT mixed mode.
+        *
+        * In an IBSS, the RIFS Mode field of the HT Operation element is
+        * also reserved, but an HT STA shall operate as though this field
+        * were set to 1.
+        */
+
+       sdata->vif.bss_conf.ht_operation_mode |=
+                 IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED
+               | IEEE80211_HT_PARAM_RIFS_MODE;
+
+       changed |= BSS_CHANGED_HT;
+       ieee80211_bss_info_change_notify(sdata, changed);
+
        ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 
        return 0;
@@ -964,6 +1129,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
        struct cfg80211_bss *cbss;
        u16 capability;
        int active_ibss;
+       struct sta_info *sta;
 
        mutex_lock(&sdata->u.ibss.mtx);
 
@@ -992,6 +1158,19 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
        }
 
        sta_info_flush(sdata->local, sdata);
+
+       spin_lock_bh(&ifibss->incomplete_lock);
+       while (!list_empty(&ifibss->incomplete_stations)) {
+               sta = list_first_entry(&ifibss->incomplete_stations,
+                                      struct sta_info, list);
+               list_del(&sta->list);
+               spin_unlock_bh(&ifibss->incomplete_lock);
+
+               sta_info_free(local, sta);
+               spin_lock_bh(&ifibss->incomplete_lock);
+       }
+       spin_unlock_bh(&ifibss->incomplete_lock);
+
        netif_carrier_off(sdata->dev);
 
        /* remove beacon */
index 762243e..c3f3e43 100644 (file)
@@ -243,6 +243,7 @@ struct ieee80211_if_ap {
        u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
        struct sk_buff_head ps_bc_buf;
        atomic_t num_sta_ps; /* number of stations in PS mode */
+       atomic_t num_sta_authorized; /* number of authorized stations */
        int dtim_count;
        bool dtim_bc_mc;
 };
@@ -474,12 +475,16 @@ struct ieee80211_if_ibss {
        u8 ssid_len, ie_len;
        u8 *ie;
        struct ieee80211_channel *channel;
+       enum nl80211_channel_type channel_type;
 
        unsigned long ibss_join_req;
        /* probe response/beacon for IBSS */
        struct sk_buff __rcu *presp;
        struct sk_buff *skb;
 
+       spinlock_t incomplete_lock;
+       struct list_head incomplete_stations;
+
        enum {
                IEEE80211_IBSS_MLME_SEARCH,
                IEEE80211_IBSS_MLME_JOINED,
@@ -514,7 +519,9 @@ struct ieee80211_if_mesh {
        atomic_t mpaths;
        /* Timestamp of last SN update */
        unsigned long last_sn_update;
-       /* Timestamp of last SN sent */
+       /* Time when it's ok to send next PERR */
+       unsigned long next_perr;
+       /* Timestamp of last PREQ sent */
        unsigned long last_preq;
        struct mesh_rmc *rmc;
        spinlock_t mesh_preq_queue_lock;
@@ -611,6 +618,9 @@ struct ieee80211_sub_if_data {
        struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
        unsigned int fragment_next;
 
+       /* TID bitmap for NoAck policy */
+       u16 noack_map;
+
        struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
        struct ieee80211_key __rcu *default_unicast_key;
        struct ieee80211_key __rcu *default_multicast_key;
@@ -845,18 +855,15 @@ struct ieee80211_local {
 
        /* Station data */
        /*
-        * The mutex only protects the list and counter,
-        * reads are done in RCU.
-        * Additionally, the lock protects the hash table,
-        * the pending list and each BSS's TIM bitmap.
+        * The mutex only protects the list, hash table and
+        * counter, reads are done with RCU.
         */
        struct mutex sta_mtx;
-       spinlock_t sta_lock;
+       spinlock_t tim_lock;
        unsigned long num_sta;
-       struct list_head sta_list, sta_pending_list;
+       struct list_head sta_list;
        struct sta_info __rcu *sta_hash[STA_HASH_SIZE];
        struct timer_list sta_cleanup;
-       struct work_struct sta_finish_work;
        int sta_generation;
 
        struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
@@ -961,7 +968,6 @@ struct ieee80211_local {
        int total_ps_buffered; /* total number of all buffered unicast and
                                * multicast packets for power saving stations
                                */
-       int wifi_wme_noack_test;
        unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
 
        /*
@@ -1166,9 +1172,8 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
 /* IBSS code */
 void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
 void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
-struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
-                                       u8 *bssid, u8 *addr, u32 supp_rates,
-                                       gfp_t gfp);
+void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
+                             const u8 *bssid, const u8 *addr, u32 supp_rates);
 int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
                        struct cfg80211_ibss_params *params);
 int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
@@ -1216,13 +1221,11 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sched_scan_stopped_work(struct work_struct *work);
 
 /* off-channel helpers */
-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
 void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
                                        bool tell_ap);
 void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
                                    bool offchannel_ps_enable);
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool enable_beaconing,
                                 bool offchannel_ps_disable);
 void ieee80211_hw_roc_setup(struct ieee80211_local *local);
 
@@ -1346,7 +1349,16 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
                                     gfp_t gfp);
 void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
 void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
-void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
+
+void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
+                         struct sk_buff *skb, int tid);
+static void inline ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
+                                   struct sk_buff *skb)
+{
+       /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
+       ieee80211_tx_skb_tid(sdata, skb, 7);
+}
+
 void ieee802_11_parse_elems(u8 *start, size_t len,
                            struct ieee802_11_elems *elems);
 u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
index b34ca0c..e47768c 100644 (file)
@@ -318,8 +318,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
                        goto err_del_interface;
                }
 
-               /* no atomic bitop required since STA is not live yet */
-               set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+               sta_info_move_state(sta, IEEE80211_STA_AUTH);
+               sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+               sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
 
                res = sta_info_insert(sta);
                if (res) {
@@ -672,7 +673,6 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_hdr *hdr;
        struct ieee80211_radiotap_header *rtap = (void *)skb->data;
-       u8 *p;
 
        if (local->hw.queues < 4)
                return 0;
@@ -683,19 +683,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
 
        hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
 
-       if (!ieee80211_is_data(hdr->frame_control)) {
-               skb->priority = 7;
-               return ieee802_1d_to_ac[skb->priority];
-       }
-       if (!ieee80211_is_data_qos(hdr->frame_control)) {
-               skb->priority = 0;
-               return ieee802_1d_to_ac[skb->priority];
-       }
-
-       p = ieee80211_get_qos_ctl(hdr);
-       skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
-
-       return ieee80211_downgrade_queue(local, skb);
+       return ieee80211_select_queue_80211(local, skb, hdr);
 }
 
 static const struct net_device_ops ieee80211_monitorif_ops = {
@@ -866,6 +854,8 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
        sdata->control_port_no_encrypt = false;
 
+       sdata->noack_map = 0;
+
        /* only monitor differs */
        sdata->dev->type = ARPHRD_ETHER;
 
index dddedfa..0a0d94a 100644 (file)
@@ -47,7 +47,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
        if (atomic_read(&local->iff_allmultis))
                new_flags |= FIF_ALLMULTI;
 
-       if (local->monitors || local->scanning)
+       if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning))
                new_flags |= FIF_BCN_PRBRESP_PROMISC;
 
        if (local->fif_probe_req || local->probe_req_reg)
@@ -92,47 +92,6 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
        ieee80211_configure_filter(local);
 }
 
-/*
- * Returns true if we are logically configured to be on
- * the operating channel AND the hardware-conf is currently
- * configured on the operating channel.  Compares channel-type
- * as well.
- */
-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
-{
-       struct ieee80211_channel *chan;
-       enum nl80211_channel_type channel_type;
-
-       /* This logic needs to match logic in ieee80211_hw_config */
-       if (local->scan_channel) {
-               chan = local->scan_channel;
-               /* If scanning on oper channel, use whatever channel-type
-                * is currently in use.
-                */
-               if (chan == local->oper_channel)
-                       channel_type = local->_oper_channel_type;
-               else
-                       channel_type = NL80211_CHAN_NO_HT;
-       } else if (local->tmp_channel) {
-               chan = local->tmp_channel;
-               channel_type = local->tmp_channel_type;
-       } else {
-               chan = local->oper_channel;
-               channel_type = local->_oper_channel_type;
-       }
-
-       if (chan != local->oper_channel ||
-           channel_type != local->_oper_channel_type)
-               return false;
-
-       /* Check current hardware-config against oper_channel. */
-       if (local->oper_channel != local->hw.conf.channel ||
-           local->_oper_channel_type != local->hw.conf.channel_type)
-               return false;
-
-       return true;
-}
-
 int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
 {
        struct ieee80211_channel *chan;
@@ -191,8 +150,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
                changed |= IEEE80211_CONF_CHANGE_SMPS;
        }
 
-       if ((local->scanning & SCAN_SW_SCANNING) ||
-           (local->scanning & SCAN_HW_SCANNING))
+       if (test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+           test_bit(SCAN_HW_SCANNING, &local->scanning))
                power = chan->max_power;
        else
                power = local->power_constr_level ?
@@ -434,9 +393,6 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
        sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
        bss_conf = &sdata->vif.bss_conf;
 
-       if (!ieee80211_sdata_running(sdata))
-               return NOTIFY_DONE;
-
        /* ARP filtering is only supported in managed mode */
        if (sdata->vif.type != NL80211_IFTYPE_STATION)
                return NOTIFY_DONE;
@@ -465,7 +421,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
        }
        bss_conf->arp_addr_cnt = c;
 
-       /* Configure driver only if associated */
+       /* Configure driver only if associated (which also implies it is up) */
        if (ifmgd->associated) {
                bss_conf->arp_filter_enabled = sdata->arp_filter_state;
                ieee80211_bss_info_change_notify(sdata,
@@ -611,7 +567,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
                        WIPHY_FLAG_OFFCHAN_TX |
                        WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
-       wiphy->features = NL80211_FEATURE_SK_TX_STATUS;
+       wiphy->features = NL80211_FEATURE_SK_TX_STATUS |
+                         NL80211_FEATURE_HT_IBSS;
 
        if (!ops->set_key)
                wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -779,6 +736,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        if (!local->int_scan_req)
                return -ENOMEM;
 
+       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               if (!local->hw.wiphy->bands[band])
+                       continue;
+               local->int_scan_req->rates[band] = (u32) -1;
+       }
+
        /* if low-level driver supports AP, we also support VLAN */
        if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
                hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
index ee82d2f..c707c8b 100644 (file)
@@ -749,6 +749,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
        atomic_set(&ifmsh->mpaths, 0);
        mesh_rmc_init(sdata);
        ifmsh->last_preq = jiffies;
+       ifmsh->next_perr = jiffies;
        /* Allocate all mesh structures when creating the first mesh interface. */
        if (!mesh_allocated)
                ieee80211s_init();
index 622cc96..bd14bd2 100644 (file)
@@ -233,6 +233,8 @@ void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
 /* Mesh paths */
 int mesh_nexthop_lookup(struct sk_buff *skb,
                struct ieee80211_sub_if_data *sdata);
+int mesh_nexthop_resolve(struct sk_buff *skb,
+                        struct ieee80211_sub_if_data *sdata);
 void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
 struct mesh_path *mesh_path_lookup(u8 *dst,
                struct ieee80211_sub_if_data *sdata);
index ce3db27..73abb75 100644 (file)
@@ -241,11 +241,15 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
 {
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct ieee80211_mgmt *mgmt;
        u8 *pos, ie_len;
        int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
                      sizeof(mgmt->u.action.u.mesh_action);
 
+       if (time_before(jiffies, ifmsh->next_perr))
+               return -EAGAIN;
+
        skb = dev_alloc_skb(local->hw.extra_tx_headroom +
                            hdr_len +
                            2 + 15 /* PERR IE */);
@@ -290,6 +294,8 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
 
        /* see note in function header */
        prepare_frame_for_deferred_tx(sdata, skb);
+       ifmsh->next_perr = TU_TO_EXP_TIME(
+                                  ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
        ieee80211_add_pending_skb(local, skb);
        return 0;
 }
@@ -393,15 +399,13 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
                orig_metric = PREQ_IE_METRIC(hwmp_ie);
                break;
        case MPATH_PREP:
-               /* Originator here refers to the MP that was the destination in
-                * the Path Request. The draft refers to that MP as the
-                * destination address, even though usually it is the origin of
-                * the PREP frame. We divert from the nomenclature in the draft
+               /* Originator here refers to the MP that was the target in the
+                * Path Request. We divert from the nomenclature in the draft
                 * so that we can easily use a single function to gather path
                 * information from both PREQ and PREP frames.
                 */
-               orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
-               orig_sn = PREP_IE_ORIG_SN(hwmp_ie);
+               orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
+               orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
                orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
                orig_metric = PREP_IE_METRIC(hwmp_ie);
                break;
@@ -562,9 +566,9 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                ttl = ifmsh->mshcfg.element_ttl;
                if (ttl != 0) {
                        mhwmp_dbg("replying to the PREQ");
-                       mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
-                               cpu_to_le32(target_sn), 0, orig_addr,
-                               cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
+                       mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
+                               cpu_to_le32(orig_sn), 0, target_addr,
+                               cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
                                cpu_to_le32(lifetime), cpu_to_le32(metric),
                                0, sdata);
                } else
@@ -618,14 +622,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
 
        mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
 
-       /* Note that we divert from the draft nomenclature and denominate
-        * destination to what the draft refers to as origininator. So in this
-        * function destnation refers to the final destination of the PREP,
-        * which corresponds with the originator of the PREQ which this PREP
-        * replies
-        */
-       target_addr = PREP_IE_TARGET_ADDR(prep_elem);
-       if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0)
+       orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
+       if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
                /* destination, no forwarding required */
                return;
 
@@ -636,7 +634,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
        }
 
        rcu_read_lock();
-       mpath = mesh_path_lookup(target_addr, sdata);
+       mpath = mesh_path_lookup(orig_addr, sdata);
        if (mpath)
                spin_lock_bh(&mpath->state_lock);
        else
@@ -651,7 +649,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
        flags = PREP_IE_FLAGS(prep_elem);
        lifetime = PREP_IE_LIFETIME(prep_elem);
        hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
-       orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
+       target_addr = PREP_IE_TARGET_ADDR(prep_elem);
        target_sn = PREP_IE_TARGET_SN(prep_elem);
        orig_sn = PREP_IE_ORIG_SN(prep_elem);
 
@@ -984,71 +982,97 @@ enddiscovery:
        kfree(preq_node);
 }
 
-/**
- * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame
+/* mesh_nexthop_resolve - lookup next hop for given skb and start path
+ * discovery if no forwarding information is found.
  *
  * @skb: 802.11 frame to be sent
  * @sdata: network subif the frame will be sent through
  *
- * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
- * found, the function will start a path discovery and queue the frame so it is
- * sent when the path is resolved. This means the caller must not free the skb
- * in this case.
+ * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
+ * skb is freeed here if no mpath could be allocated.
  */
-int mesh_nexthop_lookup(struct sk_buff *skb,
-                       struct ieee80211_sub_if_data *sdata)
+int mesh_nexthop_resolve(struct sk_buff *skb,
+                        struct ieee80211_sub_if_data *sdata)
 {
-       struct sk_buff *skb_to_free = NULL;
-       struct mesh_path *mpath;
-       struct sta_info *next_hop;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct mesh_path *mpath;
+       struct sk_buff *skb_to_free = NULL;
        u8 *target_addr = hdr->addr3;
        int err = 0;
 
        rcu_read_lock();
-       mpath = mesh_path_lookup(target_addr, sdata);
+       err = mesh_nexthop_lookup(skb, sdata);
+       if (!err)
+               goto endlookup;
 
+       /* no nexthop found, start resolving */
+       mpath = mesh_path_lookup(target_addr, sdata);
        if (!mpath) {
                mesh_path_add(target_addr, sdata);
                mpath = mesh_path_lookup(target_addr, sdata);
                if (!mpath) {
-                       sdata->u.mesh.mshstats.dropped_frames_no_route++;
+                       mesh_path_discard_frame(skb, sdata);
                        err = -ENOSPC;
                        goto endlookup;
                }
        }
 
-       if (mpath->flags & MESH_PATH_ACTIVE) {
-               if (time_after(jiffies,
-                              mpath->exp_time -
-                              msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
-                   !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
-                   !(mpath->flags & MESH_PATH_RESOLVING) &&
-                   !(mpath->flags & MESH_PATH_FIXED)) {
-                       mesh_queue_preq(mpath,
-                                       PREQ_Q_F_START | PREQ_Q_F_REFRESH);
-               }
-               next_hop = rcu_dereference(mpath->next_hop);
-               if (next_hop)
-                       memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
-               else
-                       err = -ENOENT;
-       } else {
-               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-               if (!(mpath->flags & MESH_PATH_RESOLVING)) {
-                       /* Start discovery only if it is not running yet */
-                       mesh_queue_preq(mpath, PREQ_Q_F_START);
-               }
+       if (!(mpath->flags & MESH_PATH_RESOLVING))
+               mesh_queue_preq(mpath, PREQ_Q_F_START);
+
+       if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
+               skb_to_free = skb_dequeue(&mpath->frame_queue);
 
-               if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
-                       skb_to_free = skb_dequeue(&mpath->frame_queue);
+       info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+       ieee80211_set_qos_hdr(sdata, skb);
+       skb_queue_tail(&mpath->frame_queue, skb);
+       err = -ENOENT;
+       if (skb_to_free)
+               mesh_path_discard_frame(skb_to_free, sdata);
+
+endlookup:
+       rcu_read_unlock();
+       return err;
+}
+/**
+ * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
+ * this function is considered "using" the associated mpath, so preempt a path
+ * refresh if this mpath expires soon.
+ *
+ * @skb: 802.11 frame to be sent
+ * @sdata: network subif the frame will be sent through
+ *
+ * Returns: 0 if the next hop was found. Nonzero otherwise.
+ */
+int mesh_nexthop_lookup(struct sk_buff *skb,
+                       struct ieee80211_sub_if_data *sdata)
+{
+       struct mesh_path *mpath;
+       struct sta_info *next_hop;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       u8 *target_addr = hdr->addr3;
+       int err = -ENOENT;
+
+       rcu_read_lock();
+       mpath = mesh_path_lookup(target_addr, sdata);
+
+       if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
+               goto endlookup;
+
+       if (time_after(jiffies,
+                      mpath->exp_time -
+                      msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
+           !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
+           !(mpath->flags & MESH_PATH_RESOLVING) &&
+           !(mpath->flags & MESH_PATH_FIXED))
+               mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
 
-               info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
-               ieee80211_set_qos_hdr(sdata, skb);
-               skb_queue_tail(&mpath->frame_queue, skb);
-               if (skb_to_free)
-                       mesh_path_discard_frame(skb_to_free, sdata);
-               err = -ENOENT;
+       next_hop = rcu_dereference(mpath->next_hop);
+       if (next_hop) {
+               memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
+               memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+               err = 0;
        }
 
 endlookup:
index 7bd2a76..edf167e 100644 (file)
@@ -221,6 +221,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
        while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
                hdr = (struct ieee80211_hdr *) skb->data;
                memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
+               memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
                __skb_queue_tail(&tmpq, skb);
        }
 
@@ -264,6 +265,7 @@ static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
        next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
        memcpy(hdr->addr1, next_hop, ETH_ALEN);
        rcu_read_unlock();
+       memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
        memcpy(hdr->addr3, dst_addr, ETH_ALEN);
 }
 
@@ -971,38 +973,11 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
  * @skb: frame to discard
  * @sdata: network subif the frame was to be sent through
  *
- * If the frame was being forwarded from another MP, a PERR frame will be sent
- * to the precursor.  The precursor's address (i.e. the previous hop) was saved
- * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
- * the destination is successfully resolved.
- *
  * Locking: the function must me called within a rcu_read_lock region
  */
 void mesh_path_discard_frame(struct sk_buff *skb,
                             struct ieee80211_sub_if_data *sdata)
 {
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-       struct mesh_path *mpath;
-       u32 sn = 0;
-       __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
-
-       if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
-               u8 *ra, *da;
-
-               da = hdr->addr3;
-               ra = hdr->addr1;
-               rcu_read_lock();
-               mpath = mesh_path_lookup(da, sdata);
-               if (mpath) {
-                       spin_lock_bh(&mpath->state_lock);
-                       sn = ++mpath->sn;
-                       spin_unlock_bh(&mpath->state_lock);
-               }
-               rcu_read_unlock();
-               mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
-                                  cpu_to_le32(sn), reason, ra, sdata);
-       }
-
        kfree_skb(skb);
        sdata->u.mesh.mshstats.dropped_frames_no_route++;
 }
index 7314372..41ef1b4 100644 (file)
@@ -96,9 +96,12 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
        if (!sta)
                return NULL;
 
-       set_sta_flag(sta, WLAN_STA_AUTH);
-       set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+       sta_info_move_state(sta, IEEE80211_STA_AUTH);
+       sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+       sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
+
        set_sta_flag(sta, WLAN_STA_WME);
+
        sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
        if (elems->ht_cap_elem)
                ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
index 09019d1..a984f1f 100644 (file)
@@ -819,7 +819,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
        }
 
        if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
-           (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
+           !(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
                netif_tx_stop_all_queues(sdata->dev);
 
                if (drv_tx_frames_pending(local))
@@ -1577,10 +1577,10 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
                return false;
        }
 
-       set_sta_flag(sta, WLAN_STA_AUTH);
-       set_sta_flag(sta, WLAN_STA_ASSOC);
+       sta_info_move_state(sta, IEEE80211_STA_AUTH);
+       sta_info_move_state(sta, IEEE80211_STA_ASSOC);
        if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
-               set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+               sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
 
        rates = 0;
        basic_rates = 0;
@@ -2371,6 +2371,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
                    (unsigned long) sdata);
 
        ifmgd->flags = 0;
+       ifmgd->powersave = sdata->wdev.ps;
 
        mutex_init(&ifmgd->mtx);
 
index ebd8ccc..e4330d8 100644 (file)
@@ -156,7 +156,6 @@ void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
 }
 
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool enable_beaconing,
                                 bool offchannel_ps_disable)
 {
        struct ieee80211_sub_if_data *sdata;
@@ -188,11 +187,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
                        netif_tx_wake_all_queues(sdata->dev);
                }
 
-               /* Check to see if we should re-enable beaconing */
-               if (enable_beaconing &&
-                   (sdata->vif.type == NL80211_IFTYPE_AP ||
-                    sdata->vif.type == NL80211_IFTYPE_ADHOC ||
-                    sdata->vif.type == NL80211_IFTYPE_MESH_POINT))
+               if (sdata->vif.type == NL80211_IFTYPE_AP ||
+                   sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+                   sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
                        ieee80211_bss_info_change_notify(
                                sdata, BSS_CHANGED_BEACON_ENABLED);
        }
index aeda654..502d3ec 100644 (file)
@@ -318,7 +318,7 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
                        rinfo[i].diff = i * pinfo->norm_offset;
        }
        for (i = 1; i < sband->n_bitrates; i++) {
-               s = 0;
+               s = false;
                for (j = 0; j < sband->n_bitrates - i; j++)
                        if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
                                     sband->bitrates[rinfo[j + 1].index].bitrate)) {
@@ -327,7 +327,7 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
                                rinfo[j + 1].index = tmp;
                                rinfo[rinfo[j].index].rev_index = j;
                                rinfo[rinfo[j + 1].index].rev_index = j + 1;
-                               s = 1;
+                               s = true;
                        }
                if (!s)
                        break;
index d1a8869..2be5b7d 100644 (file)
@@ -1895,13 +1895,16 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
 static ieee80211_rx_result
 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
 {
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr *fwd_hdr, *hdr;
+       struct ieee80211_tx_info *info;
        struct ieee80211s_hdr *mesh_hdr;
-       unsigned int hdrlen;
        struct sk_buff *skb = rx->skb, *fwd_skb;
        struct ieee80211_local *local = rx->local;
        struct ieee80211_sub_if_data *sdata = rx->sdata;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
+       u16 q, hdrlen;
 
        hdr = (struct ieee80211_hdr *) skb->data;
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -1917,14 +1920,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
                return RX_CONTINUE;
 
        if (!mesh_hdr->ttl)
-               /* illegal frame */
-               return RX_DROP_MONITOR;
-
-       if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
-               IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-                                               dropped_frames_congestion);
                return RX_DROP_MONITOR;
-       }
 
        if (mesh_hdr->flags & MESH_FLAGS_AE) {
                struct mesh_path *mppath;
@@ -1957,59 +1953,50 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
            compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
                return RX_CONTINUE;
 
-       skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
-       mesh_hdr->ttl--;
+       q = ieee80211_select_queue_80211(local, skb, hdr);
+       if (ieee80211_queue_stopped(&local->hw, q)) {
+               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
+               return RX_DROP_MONITOR;
+       }
+       skb_set_queue_mapping(skb, q);
 
-       if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
-               if (!mesh_hdr->ttl)
-                       IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
-                                                    dropped_frames_ttl);
-               else {
-                       struct ieee80211_hdr *fwd_hdr;
-                       struct ieee80211_tx_info *info;
-
-                       fwd_skb = skb_copy(skb, GFP_ATOMIC);
-
-                       if (!fwd_skb && net_ratelimit())
-                               printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
-                                                  sdata->name);
-                       if (!fwd_skb)
-                               goto out;
-
-                       fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
-                       memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
-                       info = IEEE80211_SKB_CB(fwd_skb);
-                       memset(info, 0, sizeof(*info));
-                       info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
-                       info->control.vif = &rx->sdata->vif;
-                       info->control.jiffies = jiffies;
-                       if (is_multicast_ether_addr(fwd_hdr->addr1)) {
-                               IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-                                                               fwded_mcast);
-                       } else {
-                               int err;
-                               /*
-                                * Save TA to addr1 to send TA a path error if a
-                                * suitable next hop is not found
-                                */
-                               memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
-                                               ETH_ALEN);
-                               err = mesh_nexthop_lookup(fwd_skb, sdata);
-                               /* Failed to immediately resolve next hop:
-                                * fwded frame was dropped or will be added
-                                * later to the pending skb queue.  */
-                               if (err)
-                                       return RX_DROP_MONITOR;
-
-                               IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-                                                               fwded_unicast);
-                       }
-                       IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-                                                    fwded_frames);
-                       ieee80211_add_pending_skb(local, fwd_skb);
-               }
+       if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+               goto out;
+
+       if (!--mesh_hdr->ttl) {
+               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+               return RX_DROP_MONITOR;
        }
 
+       fwd_skb = skb_copy(skb, GFP_ATOMIC);
+       if (!fwd_skb) {
+               if (net_ratelimit())
+                       printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
+                                       sdata->name);
+               goto out;
+       }
+
+       fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
+       info = IEEE80211_SKB_CB(fwd_skb);
+       memset(info, 0, sizeof(*info));
+       info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+       info->control.vif = &rx->sdata->vif;
+       info->control.jiffies = jiffies;
+       if (is_multicast_ether_addr(fwd_hdr->addr1)) {
+               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
+               memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
+       } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) {
+               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
+       } else {
+               /* unable to resolve next hop */
+               mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
+                                   0, reason, fwd_hdr->addr2, sdata);
+               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
+               return RX_DROP_MONITOR;
+       }
+
+       IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
+       ieee80211_add_pending_skb(local, fwd_skb);
  out:
        if (is_multicast_ether_addr(hdr->addr1) ||
            sdata->dev->flags & IFF_PROMISC)
@@ -2250,7 +2237,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                if (sdata->vif.type != NL80211_IFTYPE_STATION &&
                    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
                    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
-                   sdata->vif.type != NL80211_IFTYPE_AP)
+                   sdata->vif.type != NL80211_IFTYPE_AP &&
+                   sdata->vif.type != NL80211_IFTYPE_ADHOC)
                        break;
 
                /* verify action_code is present */
@@ -2787,8 +2775,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                                rate_idx = 0; /* TODO: HT rates */
                        else
                                rate_idx = status->rate_idx;
-                       rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
-                                       hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
+                       ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
+                                                BIT(rate_idx));
                }
                break;
        case NL80211_IFTYPE_MESH_POINT:
@@ -2809,10 +2797,17 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                                return 0;
                } else if (!ieee80211_bssid_match(bssid,
                                        sdata->vif.addr)) {
+                       /*
+                        * Accept public action frames even when the
+                        * BSSID doesn't match, this is used for P2P
+                        * and location updates. Note that mac80211
+                        * itself never looks at these frames.
+                        */
+                       if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
+                           ieee80211_is_public_action(hdr, skb->len))
+                               return 1;
                        if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
-                           !ieee80211_is_beacon(hdr->frame_control) &&
-                           !(ieee80211_is_action(hdr->frame_control) &&
-                             sdata->vif.p2p))
+                           !ieee80211_is_beacon(hdr->frame_control))
                                return 0;
                        status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
                }
index 8186303..2c9b493 100644 (file)
@@ -106,7 +106,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
        /* save the ERP value so that it is available at association time */
        if (elems->erp_info && elems->erp_info_len >= 1) {
                bss->erp_value = elems->erp_info[0];
-               bss->has_erp_value = 1;
+               bss->has_erp_value = true;
        }
 
        if (elems->tim) {
@@ -297,7 +297,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
        if (!was_hw_scan) {
                ieee80211_configure_filter(local);
                drv_sw_scan_complete(local);
-               ieee80211_offchannel_return(local, true, true);
+               ieee80211_offchannel_return(local, true);
        }
 
        ieee80211_recalc_idle(local);
@@ -602,7 +602,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
         * in off-channel state..will put that back
         * on-channel at the end of scanning.
         */
-       ieee80211_offchannel_return(local, true, false);
+       ieee80211_offchannel_return(local, false);
 
        *next_delay = HZ / 5;
        /* afterwards, resume scan & go to next channel */
index 8eaa746..2db01e9 100644 (file)
  * freed before they are done using it.
  */
 
-/* Caller must hold local->sta_lock */
+/* Caller must hold local->sta_mtx */
 static int sta_info_hash_del(struct ieee80211_local *local,
                             struct sta_info *sta)
 {
        struct sta_info *s;
 
        s = rcu_dereference_protected(local->sta_hash[STA_HASH(sta->sta.addr)],
-                                     lockdep_is_held(&local->sta_lock));
+                                     lockdep_is_held(&local->sta_mtx));
        if (!s)
                return -ENOENT;
        if (s == sta) {
@@ -81,7 +81,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
        while (rcu_access_pointer(s->hnext) &&
               rcu_access_pointer(s->hnext) != sta)
                s = rcu_dereference_protected(s->hnext,
-                                       lockdep_is_held(&local->sta_lock));
+                                       lockdep_is_held(&local->sta_mtx));
        if (rcu_access_pointer(s->hnext)) {
                RCU_INIT_POINTER(s->hnext, sta->hnext);
                return 0;
@@ -98,14 +98,12 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
        struct sta_info *sta;
 
        sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
-                                   lockdep_is_held(&local->sta_lock) ||
                                    lockdep_is_held(&local->sta_mtx));
        while (sta) {
                if (sta->sdata == sdata && !sta->dummy &&
                    memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
                        break;
                sta = rcu_dereference_check(sta->hnext,
-                                           lockdep_is_held(&local->sta_lock) ||
                                            lockdep_is_held(&local->sta_mtx));
        }
        return sta;
@@ -119,14 +117,12 @@ struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
        struct sta_info *sta;
 
        sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
-                                   lockdep_is_held(&local->sta_lock) ||
                                    lockdep_is_held(&local->sta_mtx));
        while (sta) {
                if (sta->sdata == sdata &&
                    memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
                        break;
                sta = rcu_dereference_check(sta->hnext,
-                                           lockdep_is_held(&local->sta_lock) ||
                                            lockdep_is_held(&local->sta_mtx));
        }
        return sta;
@@ -143,7 +139,6 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
        struct sta_info *sta;
 
        sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
-                                   lockdep_is_held(&local->sta_lock) ||
                                    lockdep_is_held(&local->sta_mtx));
        while (sta) {
                if ((sta->sdata == sdata ||
@@ -152,7 +147,6 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
                    memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
                        break;
                sta = rcu_dereference_check(sta->hnext,
-                                           lockdep_is_held(&local->sta_lock) ||
                                            lockdep_is_held(&local->sta_mtx));
        }
        return sta;
@@ -169,7 +163,6 @@ struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
        struct sta_info *sta;
 
        sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
-                                   lockdep_is_held(&local->sta_lock) ||
                                    lockdep_is_held(&local->sta_mtx));
        while (sta) {
                if ((sta->sdata == sdata ||
@@ -177,7 +170,6 @@ struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
                    memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
                        break;
                sta = rcu_dereference_check(sta->hnext,
-                                           lockdep_is_held(&local->sta_lock) ||
                                            lockdep_is_held(&local->sta_mtx));
        }
        return sta;
@@ -204,16 +196,17 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
 }
 
 /**
- * __sta_info_free - internal STA free helper
+ * sta_info_free - free STA
  *
  * @local: pointer to the global information
  * @sta: STA info to free
  *
  * This function must undo everything done by sta_info_alloc()
- * that may happen before sta_info_insert().
+ * that may happen before sta_info_insert(). It may only be
+ * called when sta_info_insert() has not been attempted (and
+ * if that fails, the station is freed anyway.)
  */
-static void __sta_info_free(struct ieee80211_local *local,
-                           struct sta_info *sta)
+void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
 {
        if (sta->rate_ctrl) {
                rate_control_free_sta(sta);
@@ -227,10 +220,11 @@ static void __sta_info_free(struct ieee80211_local *local,
        kfree(sta);
 }
 
-/* Caller must hold local->sta_lock */
+/* Caller must hold local->sta_mtx */
 static void sta_info_hash_add(struct ieee80211_local *local,
                              struct sta_info *sta)
 {
+       lockdep_assert_held(&local->sta_mtx);
        sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
        RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
 }
@@ -280,7 +274,7 @@ static int sta_prepare_rate_control(struct ieee80211_local *local,
 }
 
 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
-                               u8 *addr, gfp_t gfp)
+                               const u8 *addr, gfp_t gfp)
 {
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
@@ -338,102 +332,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        return sta;
 }
 
-static int sta_info_finish_insert(struct sta_info *sta,
-                               bool async, bool dummy_reinsert)
-{
-       struct ieee80211_local *local = sta->local;
-       struct ieee80211_sub_if_data *sdata = sta->sdata;
-       struct station_info sinfo;
-       unsigned long flags;
-       int err = 0;
-
-       lockdep_assert_held(&local->sta_mtx);
-
-       if (!sta->dummy || dummy_reinsert) {
-               /* notify driver */
-               if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-                       sdata = container_of(sdata->bss,
-                                            struct ieee80211_sub_if_data,
-                                            u.ap);
-               err = drv_sta_add(local, sdata, &sta->sta);
-               if (err) {
-                       if (!async)
-                               return err;
-                       printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
-                                         "driver (%d) - keeping it anyway.\n",
-                              sdata->name, sta->sta.addr, err);
-               } else {
-                       sta->uploaded = true;
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-                       if (async)
-                               wiphy_debug(local->hw.wiphy,
-                                           "Finished adding IBSS STA %pM\n",
-                                           sta->sta.addr);
-#endif
-               }
-
-               sdata = sta->sdata;
-       }
-
-       if (!dummy_reinsert) {
-               if (!async) {
-                       local->num_sta++;
-                       local->sta_generation++;
-                       smp_mb();
-
-                       /* make the station visible */
-                       spin_lock_irqsave(&local->sta_lock, flags);
-                       sta_info_hash_add(local, sta);
-                       spin_unlock_irqrestore(&local->sta_lock, flags);
-               }
-
-               list_add(&sta->list, &local->sta_list);
-       } else {
-               sta->dummy = false;
-       }
-
-       if (!sta->dummy) {
-               ieee80211_sta_debugfs_add(sta);
-               rate_control_add_sta_debugfs(sta);
-
-               memset(&sinfo, 0, sizeof(sinfo));
-               sinfo.filled = 0;
-               sinfo.generation = local->sta_generation;
-               cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
-       }
-
-       return 0;
-}
-
-static void sta_info_finish_pending(struct ieee80211_local *local)
-{
-       struct sta_info *sta;
-       unsigned long flags;
-
-       spin_lock_irqsave(&local->sta_lock, flags);
-       while (!list_empty(&local->sta_pending_list)) {
-               sta = list_first_entry(&local->sta_pending_list,
-                                      struct sta_info, list);
-               list_del(&sta->list);
-               spin_unlock_irqrestore(&local->sta_lock, flags);
-
-               sta_info_finish_insert(sta, true, false);
-
-               spin_lock_irqsave(&local->sta_lock, flags);
-       }
-       spin_unlock_irqrestore(&local->sta_lock, flags);
-}
-
-static void sta_info_finish_work(struct work_struct *work)
-{
-       struct ieee80211_local *local =
-               container_of(work, struct ieee80211_local, sta_finish_work);
-
-       mutex_lock(&local->sta_mtx);
-       sta_info_finish_pending(local);
-       mutex_unlock(&local->sta_mtx);
-}
-
 static int sta_info_insert_check(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -453,70 +351,24 @@ static int sta_info_insert_check(struct sta_info *sta)
        return 0;
 }
 
-static int sta_info_insert_ibss(struct sta_info *sta) __acquires(RCU)
-{
-       struct ieee80211_local *local = sta->local;
-       struct ieee80211_sub_if_data *sdata = sta->sdata;
-       unsigned long flags;
-
-       spin_lock_irqsave(&local->sta_lock, flags);
-       /* check if STA exists already */
-       if (sta_info_get_bss_rx(sdata, sta->sta.addr)) {
-               spin_unlock_irqrestore(&local->sta_lock, flags);
-               rcu_read_lock();
-               return -EEXIST;
-       }
-
-       local->num_sta++;
-       local->sta_generation++;
-       smp_mb();
-       sta_info_hash_add(local, sta);
-
-       list_add_tail(&sta->list, &local->sta_pending_list);
-
-       rcu_read_lock();
-       spin_unlock_irqrestore(&local->sta_lock, flags);
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-       wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
-                       sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
-
-       ieee80211_queue_work(&local->hw, &local->sta_finish_work);
-
-       return 0;
-}
-
 /*
  * should be called with sta_mtx locked
  * this function replaces the mutex lock
  * with a RCU lock
  */
-static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
+static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 {
        struct ieee80211_local *local = sta->local;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       unsigned long flags;
        struct sta_info *exist_sta;
        bool dummy_reinsert = false;
        int err = 0;
 
        lockdep_assert_held(&local->sta_mtx);
 
-       /*
-        * On first glance, this will look racy, because the code
-        * in this function, which inserts a station with sleeping,
-        * unlocks the sta_lock between checking existence in the
-        * hash table and inserting into it.
-        *
-        * However, it is not racy against itself because it keeps
-        * the mutex locked.
-        */
-
-       spin_lock_irqsave(&local->sta_lock, flags);
        /*
         * check if STA exists already.
-        * only accept a scenario of a second call to sta_info_insert_non_ibss
+        * only accept a scenario of a second call to sta_info_insert_finish
         * with a dummy station entry that was inserted earlier
         * in that case - assume that the dummy station flag should
         * be removed.
@@ -526,20 +378,47 @@ static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
                if (exist_sta == sta && sta->dummy) {
                        dummy_reinsert = true;
                } else {
-                       spin_unlock_irqrestore(&local->sta_lock, flags);
-                       mutex_unlock(&local->sta_mtx);
-                       rcu_read_lock();
-                       return -EEXIST;
+                       err = -EEXIST;
+                       goto out_err;
                }
        }
 
-       spin_unlock_irqrestore(&local->sta_lock, flags);
+       if (!sta->dummy || dummy_reinsert) {
+               /* notify driver */
+               err = drv_sta_add(local, sdata, &sta->sta);
+               if (err) {
+                       if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
+                               goto out_err;
+                       printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
+                                         "driver (%d) - keeping it anyway.\n",
+                              sdata->name, sta->sta.addr, err);
+               } else
+                       sta->uploaded = true;
+       }
 
-       err = sta_info_finish_insert(sta, false, dummy_reinsert);
-       if (err) {
-               mutex_unlock(&local->sta_mtx);
-               rcu_read_lock();
-               return err;
+       if (!dummy_reinsert) {
+               local->num_sta++;
+               local->sta_generation++;
+               smp_mb();
+
+               /* make the station visible */
+               sta_info_hash_add(local, sta);
+
+               list_add(&sta->list, &local->sta_list);
+       } else {
+               sta->dummy = false;
+       }
+
+       if (!sta->dummy) {
+               struct station_info sinfo;
+
+               ieee80211_sta_debugfs_add(sta);
+               rate_control_add_sta_debugfs(sta);
+
+               memset(&sinfo, 0, sizeof(sinfo));
+               sinfo.filled = 0;
+               sinfo.generation = local->sta_generation;
+               cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
        }
 
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -555,54 +434,35 @@ static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
                mesh_accept_plinks_update(sdata);
 
        return 0;
+ out_err:
+       mutex_unlock(&local->sta_mtx);
+       rcu_read_lock();
+       return err;
 }
 
 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
 {
        struct ieee80211_local *local = sta->local;
-       struct ieee80211_sub_if_data *sdata = sta->sdata;
        int err = 0;
 
+       might_sleep();
+
        err = sta_info_insert_check(sta);
        if (err) {
                rcu_read_lock();
                goto out_free;
        }
 
-       /*
-        * In ad-hoc mode, we sometimes need to insert stations
-        * from tasklet context from the RX path. To avoid races,
-        * always do so in that case -- see the comment below.
-        */
-       if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
-               err = sta_info_insert_ibss(sta);
-               if (err)
-                       goto out_free;
-
-               return 0;
-       }
-
-       /*
-        * It might seem that the function called below is in race against
-        * the function call above that atomically inserts the station... That,
-        * however, is not true because the above code can only
-        * be invoked for IBSS interfaces, and the below code will
-        * not be -- and the two do not race against each other as
-        * the hash table also keys off the interface.
-        */
-
-       might_sleep();
-
        mutex_lock(&local->sta_mtx);
 
-       err = sta_info_insert_non_ibss(sta);
+       err = sta_info_insert_finish(sta);
        if (err)
                goto out_free;
 
        return 0;
  out_free:
        BUG_ON(!err);
-       __sta_info_free(local, sta);
+       sta_info_free(local, sta);
        return err;
 }
 
@@ -629,7 +489,7 @@ int sta_info_reinsert(struct sta_info *sta)
 
        might_sleep();
 
-       err = sta_info_insert_non_ibss(sta);
+       err = sta_info_insert_finish(sta);
        rcu_read_unlock();
        return err;
 }
@@ -716,7 +576,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
        }
 
  done:
-       spin_lock_irqsave(&local->sta_lock, flags);
+       spin_lock_irqsave(&local->tim_lock, flags);
 
        if (indicate_tim)
                __bss_tim_set(bss, sta->sta.aid);
@@ -729,7 +589,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
                local->tim_in_locked_section = false;
        }
 
-       spin_unlock_irqrestore(&local->sta_lock, flags);
+       spin_unlock_irqrestore(&local->tim_lock, flags);
 }
 
 static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
@@ -853,8 +713,8 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
 {
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
-       unsigned long flags;
        int ret, i, ac;
+       struct tid_ampdu_tx *tid_tx;
 
        might_sleep();
 
@@ -873,15 +733,12 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
        set_sta_flag(sta, WLAN_STA_BLOCK_BA);
        ieee80211_sta_tear_down_BA_sessions(sta, true);
 
-       spin_lock_irqsave(&local->sta_lock, flags);
        ret = sta_info_hash_del(local, sta);
-       /* this might still be the pending list ... which is fine */
-       if (!ret)
-               list_del(&sta->list);
-       spin_unlock_irqrestore(&local->sta_lock, flags);
        if (ret)
                return ret;
 
+       list_del(&sta->list);
+
        mutex_lock(&local->key_mtx);
        for (i = 0; i < NUM_DEFAULT_KEYS; i++)
                __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
@@ -908,6 +765,9 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
        if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
                RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
 
+       while (sta->sta_state > IEEE80211_STA_NONE)
+               sta_info_move_state(sta, sta->sta_state - 1);
+
        if (sta->uploaded) {
                if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
                        sdata = container_of(sdata->bss,
@@ -953,7 +813,31 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
        }
 #endif
 
-       __sta_info_free(local, sta);
+       /* There could be some memory leaks because of ampdu tx pending queue
+        * not being freed before destroying the station info.
+        *
+        * Make sure that such queues are purged before freeing the station
+        * info.
+        * TODO: We have to somehow postpone the full destruction
+        * until the aggregation stop completes. Refer
+        * http://thread.gmane.org/gmane.linux.kernel.wireless.general/81936
+        */
+       for (i = 0; i < STA_TID_NUM; i++) {
+               if (!sta->ampdu_mlme.tid_tx[i])
+                       continue;
+               tid_tx = sta->ampdu_mlme.tid_tx[i];
+               if (skb_queue_len(&tid_tx->pending)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+                       wiphy_debug(local->hw.wiphy, "TX A-MPDU  purging %d "
+                               "packets for tid=%d\n",
+                               skb_queue_len(&tid_tx->pending), i);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+                       __skb_queue_purge(&tid_tx->pending);
+               }
+               kfree_rcu(tid_tx, rcu_head);
+       }
+
+       sta_info_free(local, sta);
 
        return 0;
 }
@@ -1009,11 +893,9 @@ static void sta_info_cleanup(unsigned long data)
 
 void sta_info_init(struct ieee80211_local *local)
 {
-       spin_lock_init(&local->sta_lock);
+       spin_lock_init(&local->tim_lock);
        mutex_init(&local->sta_mtx);
        INIT_LIST_HEAD(&local->sta_list);
-       INIT_LIST_HEAD(&local->sta_pending_list);
-       INIT_WORK(&local->sta_finish_work, sta_info_finish_work);
 
        setup_timer(&local->sta_cleanup, sta_info_cleanup,
                    (unsigned long)local);
@@ -1042,9 +924,6 @@ int sta_info_flush(struct ieee80211_local *local,
        might_sleep();
 
        mutex_lock(&local->sta_mtx);
-
-       sta_info_finish_pending(local);
-
        list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
                if (!sdata || sdata == sta->sdata)
                        WARN_ON(__sta_info_destroy(sta));
@@ -1517,3 +1396,56 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
        sta_info_recalc_tim(sta);
 }
 EXPORT_SYMBOL(ieee80211_sta_set_buffered);
+
+int sta_info_move_state_checked(struct sta_info *sta,
+                               enum ieee80211_sta_state new_state)
+{
+       might_sleep();
+
+       if (sta->sta_state == new_state)
+               return 0;
+
+       switch (new_state) {
+       case IEEE80211_STA_NONE:
+               if (sta->sta_state == IEEE80211_STA_AUTH)
+                       clear_bit(WLAN_STA_AUTH, &sta->_flags);
+               else
+                       return -EINVAL;
+               break;
+       case IEEE80211_STA_AUTH:
+               if (sta->sta_state == IEEE80211_STA_NONE)
+                       set_bit(WLAN_STA_AUTH, &sta->_flags);
+               else if (sta->sta_state == IEEE80211_STA_ASSOC)
+                       clear_bit(WLAN_STA_ASSOC, &sta->_flags);
+               else
+                       return -EINVAL;
+               break;
+       case IEEE80211_STA_ASSOC:
+               if (sta->sta_state == IEEE80211_STA_AUTH) {
+                       set_bit(WLAN_STA_ASSOC, &sta->_flags);
+               } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
+                       if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
+                               atomic_dec(&sta->sdata->u.ap.num_sta_authorized);
+                       clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+               } else
+                       return -EINVAL;
+               break;
+       case IEEE80211_STA_AUTHORIZED:
+               if (sta->sta_state == IEEE80211_STA_ASSOC) {
+                       if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
+                               atomic_inc(&sta->sdata->u.ap.num_sta_authorized);
+                       set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+               } else
+                       return -EINVAL;
+               break;
+       default:
+               WARN(1, "invalid state %d", new_state);
+               return -EINVAL;
+       }
+
+       printk(KERN_DEBUG "%s: moving STA %pM to state %d\n",
+               sta->sdata->name, sta->sta.addr, new_state);
+       sta->sta_state = new_state;
+
+       return 0;
+}
index 6280e8b..15b3bb7 100644 (file)
@@ -73,6 +73,14 @@ enum ieee80211_sta_info_flags {
        WLAN_STA_4ADDR_EVENT,
 };
 
+enum ieee80211_sta_state {
+       /* NOTE: These need to be ordered correctly! */
+       IEEE80211_STA_NONE,
+       IEEE80211_STA_AUTH,
+       IEEE80211_STA_ASSOC,
+       IEEE80211_STA_AUTHORIZED,
+};
+
 #define STA_TID_NUM 16
 #define ADDBA_RESP_INTERVAL HZ
 #define HT_AGG_MAX_RETRIES             0x3
@@ -88,6 +96,7 @@ enum ieee80211_sta_info_flags {
  * struct tid_ampdu_tx - TID aggregation information (Tx).
  *
  * @rcu_head: rcu head for freeing structure
+ * @session_timer: check if we keep Tx-ing on the TID (by timeout value)
  * @addba_resp_timer: timer for peer's response to addba request
  * @pending: pending frames queue -- use sta's spinlock to protect
  * @dialog_token: dialog token for aggregation session
@@ -110,6 +119,7 @@ enum ieee80211_sta_info_flags {
  */
 struct tid_ampdu_tx {
        struct rcu_head rcu_head;
+       struct timer_list session_timer;
        struct timer_list addba_resp_timer;
        struct sk_buff_head pending;
        unsigned long state;
@@ -260,6 +270,7 @@ struct sta_ampdu_mlme {
  * @dummy: indicate a dummy station created for receiving
  *     EAP frames before association
  * @sta: station information we share with the driver
+ * @sta_state: duplicates information about station state (for debug)
  */
 struct sta_info {
        /* General information, mostly static */
@@ -281,6 +292,8 @@ struct sta_info {
 
        bool uploaded;
 
+       enum ieee80211_sta_state sta_state;
+
        /* use the accessors defined below */
        unsigned long _flags;
 
@@ -369,12 +382,18 @@ static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
 static inline void set_sta_flag(struct sta_info *sta,
                                enum ieee80211_sta_info_flags flag)
 {
+       WARN_ON(flag == WLAN_STA_AUTH ||
+               flag == WLAN_STA_ASSOC ||
+               flag == WLAN_STA_AUTHORIZED);
        set_bit(flag, &sta->_flags);
 }
 
 static inline void clear_sta_flag(struct sta_info *sta,
                                  enum ieee80211_sta_info_flags flag)
 {
+       WARN_ON(flag == WLAN_STA_AUTH ||
+               flag == WLAN_STA_ASSOC ||
+               flag == WLAN_STA_AUTHORIZED);
        clear_bit(flag, &sta->_flags);
 }
 
@@ -387,15 +406,32 @@ static inline int test_sta_flag(struct sta_info *sta,
 static inline int test_and_clear_sta_flag(struct sta_info *sta,
                                          enum ieee80211_sta_info_flags flag)
 {
+       WARN_ON(flag == WLAN_STA_AUTH ||
+               flag == WLAN_STA_ASSOC ||
+               flag == WLAN_STA_AUTHORIZED);
        return test_and_clear_bit(flag, &sta->_flags);
 }
 
 static inline int test_and_set_sta_flag(struct sta_info *sta,
                                        enum ieee80211_sta_info_flags flag)
 {
+       WARN_ON(flag == WLAN_STA_AUTH ||
+               flag == WLAN_STA_ASSOC ||
+               flag == WLAN_STA_AUTHORIZED);
        return test_and_set_bit(flag, &sta->_flags);
 }
 
+int sta_info_move_state_checked(struct sta_info *sta,
+                               enum ieee80211_sta_state new_state);
+
+static inline void sta_info_move_state(struct sta_info *sta,
+                                      enum ieee80211_sta_state new_state)
+{
+       int ret = sta_info_move_state_checked(sta, new_state);
+       WARN_ON_ONCE(ret);
+}
+
+
 void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
                             struct tid_ampdu_tx *tid_tx);
 
@@ -486,7 +522,10 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
  * until sta_info_insert().
  */
 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
-                               u8 *addr, gfp_t gfp);
+                               const u8 *addr, gfp_t gfp);
+
+void sta_info_free(struct ieee80211_local *local, struct sta_info *sta);
+
 /*
  * Insert STA info into hash table/list, returns zero or a
  * -EEXIST if (if the same MAC address is already present).
@@ -497,7 +536,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
  */
 int sta_info_insert(struct sta_info *sta);
 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
-int sta_info_insert_atomic(struct sta_info *sta);
 int sta_info_reinsert(struct sta_info *sta);
 
 int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
index 46222ce..30c265c 100644 (file)
@@ -340,7 +340,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_local *local = hw_to_local(hw);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       u16 frag, type;
        __le16 fc;
        struct ieee80211_supported_band *sband;
        struct ieee80211_sub_if_data *sdata;
@@ -476,12 +475,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
         * Fragments are passed to low-level drivers as separate skbs, so these
         * are actually fragments, not frames. Update frame counters only for
         * the first fragment of the frame. */
-
-       frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
-       type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
-
        if (info->flags & IEEE80211_TX_STAT_ACK) {
-               if (frag == 0) {
+               if (ieee80211_is_first_frag(hdr->seq_ctrl)) {
                        local->dot11TransmittedFrameCount++;
                        if (is_multicast_ether_addr(hdr->addr1))
                                local->dot11MulticastTransmittedFrameCount++;
@@ -496,11 +491,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                 * with a multicast address in the address 1 field of type Data
                 * or Management. */
                if (!is_multicast_ether_addr(hdr->addr1) ||
-                   type == IEEE80211_FTYPE_DATA ||
-                   type == IEEE80211_FTYPE_MGMT)
+                   ieee80211_is_data(fc) ||
+                   ieee80211_is_mgmt(fc))
                        local->dot11TransmittedFragmentCount++;
        } else {
-               if (frag == 0)
+               if (ieee80211_is_first_frag(hdr->seq_ctrl))
                        local->dot11FailedCount++;
        }
 
@@ -572,7 +567,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        /* Need to make a copy before skb->cb gets cleared */
        send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
-                       (type != IEEE80211_FTYPE_DATA);
+                        !(ieee80211_is_data(fc));
 
        /*
         * This is a bit racy but we can avoid a lot of work
index 8d31933..edcd1c7 100644 (file)
@@ -151,11 +151,15 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
                rate = mrate;
        }
 
-       /* Time needed to transmit ACK
-        * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
-        * to closest integer */
-
-       dur = ieee80211_frame_duration(local, 10, rate, erp,
+       /* Don't calculate ACKs for QoS Frames with NoAck Policy set */
+       if (ieee80211_is_data_qos(hdr->frame_control) &&
+           *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+               dur = 0;
+       else
+               /* Time needed to transmit ACK
+                * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
+                * to closest integer */
+               dur = ieee80211_frame_duration(local, 10, rate, erp,
                                tx->sdata->vif.bss_conf.use_short_preamble);
 
        if (next_frag_len) {
@@ -291,7 +295,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
 
        if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
                if (unlikely(!assoc &&
-                            tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
                             ieee80211_is_data(hdr->frame_control))) {
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
                        printk(KERN_DEBUG "%s: dropped data frame to not "
@@ -301,17 +304,14 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
                        I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
                        return TX_DROP;
                }
-       } else {
-               if (unlikely(ieee80211_is_data(hdr->frame_control) &&
-                            tx->local->num_sta == 0 &&
-                            tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) {
-                       /*
-                        * No associated STAs - no need to send multicast
-                        * frames.
-                        */
-                       return TX_DROP;
-               }
-               return TX_CONTINUE;
+       } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP &&
+                           ieee80211_is_data(hdr->frame_control) &&
+                           !atomic_read(&tx->sdata->u.ap.num_sta_authorized))) {
+               /*
+                * No associated STAs - no need to send multicast
+                * frames.
+                */
+               return TX_DROP;
        }
 
        return TX_CONTINUE;
@@ -636,6 +636,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
        else
                txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
        txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
+                   tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
                    tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
 
        /* set up RTS protection if desired */
@@ -1063,9 +1064,11 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
                                  int tid)
 {
        bool queued = false;
+       bool reset_agg_timer = false;
 
        if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
                info->flags |= IEEE80211_TX_CTL_AMPDU;
+               reset_agg_timer = true;
        } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
                /*
                 * nothing -- this aggregation session is being started
@@ -1097,6 +1100,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
                        /* do nothing, let packet pass through */
                } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
                        info->flags |= IEEE80211_TX_CTL_AMPDU;
+                       reset_agg_timer = true;
                } else {
                        queued = true;
                        info->control.vif = &tx->sdata->vif;
@@ -1106,6 +1110,11 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
                spin_unlock(&tx->sta->lock);
        }
 
+       /* reset session timer */
+       if (reset_agg_timer && tid_tx->timeout)
+               mod_timer(&tid_tx->session_timer,
+                         TU_TO_EXP_TIME(tid_tx->timeout));
+
        return queued;
 }
 
@@ -1173,16 +1182,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        if (is_multicast_ether_addr(hdr->addr1)) {
                tx->flags &= ~IEEE80211_TX_UNICAST;
                info->flags |= IEEE80211_TX_CTL_NO_ACK;
-       } else {
+       } else
                tx->flags |= IEEE80211_TX_UNICAST;
-               if (unlikely(local->wifi_wme_noack_test))
-                       info->flags |= IEEE80211_TX_CTL_NO_ACK;
-               /*
-                * Flags are initialized to 0. Hence, no need to
-                * explicitly unset IEEE80211_TX_CTL_NO_ACK since
-                * it might already be set for injected frames.
-                */
-       }
 
        if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
                if (!(tx->flags & IEEE80211_TX_UNICAST) ||
@@ -1223,9 +1224,10 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
                         * queue is woken again.
                         */
                        if (txpending)
-                               skb_queue_splice(skbs, &local->pending[q]);
+                               skb_queue_splice_init(skbs, &local->pending[q]);
                        else
-                               skb_queue_splice_tail(skbs, &local->pending[q]);
+                               skb_queue_splice_tail_init(skbs,
+                                                          &local->pending[q]);
 
                        spin_unlock_irqrestore(&local->queue_stop_reason_lock,
                                               flags);
@@ -1297,7 +1299,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
        ieee80211_tpt_led_trig_tx(local, fc, led_len);
        ieee80211_led_tx(local, 1);
 
-       WARN_ON(!skb_queue_empty(skbs));
+       WARN_ON_ONCE(!skb_queue_empty(skbs));
 
        return result;
 }
@@ -1326,8 +1328,11 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
        if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
                CALL_TXH(ieee80211_tx_h_rate_ctrl);
 
-       if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION))
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
+               __skb_queue_tail(&tx->skbs, tx->skb);
+               tx->skb = NULL;
                goto txh_done;
+       }
 
        CALL_TXH(ieee80211_tx_h_michael_mic_add);
        CALL_TXH(ieee80211_tx_h_sequence);
@@ -1458,7 +1463,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
        if (ieee80211_vif_is_mesh(&sdata->vif) &&
            ieee80211_is_data(hdr->frame_control) &&
                !is_multicast_ether_addr(hdr->addr1))
-                       if (mesh_nexthop_lookup(skb, sdata)) {
+                       if (mesh_nexthop_resolve(skb, sdata)) {
                                /* skb queued: don't free */
                                rcu_read_unlock();
                                return;
@@ -2260,10 +2265,10 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
                /* Bitmap control */
                *pos++ = n1 | aid0;
                /* Part Virt Bitmap */
+               skb_put(skb, n2 - n1);
                memcpy(pos, bss->tim + n1, n2 - n1 + 1);
 
                tim[1] = n2 - n1 + 4;
-               skb_put(skb, n2 - n1);
        } else {
                *pos++ = aid0; /* Bitmap control */
                *pos++ = 0; /* Part Virt Bitmap */
@@ -2328,9 +2333,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                        } else {
                                unsigned long flags;
 
-                               spin_lock_irqsave(&local->sta_lock, flags);
+                               spin_lock_irqsave(&local->tim_lock, flags);
                                ieee80211_beacon_add_tim(ap, skb, beacon);
-                               spin_unlock_irqrestore(&local->sta_lock, flags);
+                               spin_unlock_irqrestore(&local->tim_lock, flags);
                        }
 
                        if (tim_offset)
@@ -2691,15 +2696,15 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
 }
 EXPORT_SYMBOL(ieee80211_get_buffered_bc);
 
-void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
+                         struct sk_buff *skb, int tid)
 {
        skb_set_mac_header(skb, 0);
        skb_set_network_header(skb, 0);
        skb_set_transport_header(skb, 0);
 
-       /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
-       skb_set_queue_mapping(skb, IEEE80211_AC_VO);
-       skb->priority = 7;
+       skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
+       skb->priority = tid;
 
        /*
         * The other path calling ieee80211_xmit is from the tasklet,
index 3b9b492..eb1a5f7 100644 (file)
@@ -1192,7 +1192,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                                             struct ieee80211_sub_if_data,
                                             u.ap);
 
-                       memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
                        WARN_ON(drv_sta_add(local, sdata, &sta->sta));
                }
        }
@@ -1235,7 +1234,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
 
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_STATION:
-                       changed |= BSS_CHANGED_ASSOC;
+                       changed |= BSS_CHANGED_ASSOC |
+                                  BSS_CHANGED_ARP_FILTER;
                        mutex_lock(&sdata->u.mgd.mtx);
                        ieee80211_bss_info_change_notify(sdata, changed);
                        mutex_unlock(&sdata->u.mgd.mtx);
@@ -1244,8 +1244,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                        changed |= BSS_CHANGED_IBSS;
                        /* fall through */
                case NL80211_IFTYPE_AP:
-                       changed |= BSS_CHANGED_SSID |
-                                  BSS_CHANGED_AP_PROBE_RESP;
+                       changed |= BSS_CHANGED_SSID;
+
+                       if (sdata->vif.type == NL80211_IFTYPE_AP)
+                               changed |= BSS_CHANGED_AP_PROBE_RESP;
+
                        /* fall through */
                case NL80211_IFTYPE_MESH_POINT:
                        changed |= BSS_CHANGED_BEACON |
@@ -1585,6 +1588,11 @@ u8 *ieee80211_ie_build_ht_info(u8 *pos,
        }
        if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
                ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
+
+       /*
+        * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and
+        * RIFS Mode are reserved in IBSS mode, therefore keep them at 0
+        */
        ht_info->operation_mode = 0x0000;
        ht_info->stbc_param = 0x0000;
 
index 4332711..89511be 100644 (file)
@@ -52,6 +52,30 @@ static int wme_downgrade_ac(struct sk_buff *skb)
        }
 }
 
+/* Indicate which queue to use for this fully formed 802.11 frame */
+u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+                                struct sk_buff *skb,
+                                struct ieee80211_hdr *hdr)
+{
+       u8 *p;
+
+       if (local->hw.queues < 4)
+               return 0;
+
+       if (!ieee80211_is_data(hdr->frame_control)) {
+               skb->priority = 7;
+               return ieee802_1d_to_ac[skb->priority];
+       }
+       if (!ieee80211_is_data_qos(hdr->frame_control)) {
+               skb->priority = 0;
+               return ieee802_1d_to_ac[skb->priority];
+       }
+
+       p = ieee80211_get_qos_ctl(hdr);
+       skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
+
+       return ieee80211_downgrade_queue(local, skb);
+}
 
 /* Indicate which queue to use. */
 u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
@@ -139,6 +163,7 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
                           struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (void *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
        /* Fill in the QoS header if there is one. */
        if (ieee80211_is_data_qos(hdr->frame_control)) {
@@ -150,9 +175,12 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
                /* preserve EOSP bit */
                ack_policy = *p & IEEE80211_QOS_CTL_EOSP;
 
-               if (unlikely(sdata->local->wifi_wme_noack_test) ||
-                   is_multicast_ether_addr(hdr->addr1))
+               if (is_multicast_ether_addr(hdr->addr1) ||
+                   sdata->noack_map & BIT(tid)) {
                        ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
+                       info->flags |= IEEE80211_TX_CTL_NO_ACK;
+               }
+
                /* qos header is 2 bytes */
                *p++ = ack_policy | tid;
                *p = ieee80211_vif_is_mesh(&sdata->vif) ?
index 34e166f..94edceb 100644 (file)
@@ -15,6 +15,9 @@
 
 extern const int ieee802_1d_to_ac[8];
 
+u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+                                struct sk_buff *skb,
+                                struct ieee80211_hdr *hdr);
 u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
                           struct sk_buff *skb);
 void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
index 6884a2d..c6dd01a 100644 (file)
@@ -862,44 +862,6 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
        kfree_skb(skb);
 }
 
-static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
-                                      enum nl80211_channel_type oper_ct)
-{
-       switch (wk_ct) {
-       case NL80211_CHAN_NO_HT:
-               return true;
-       case NL80211_CHAN_HT20:
-               if (oper_ct != NL80211_CHAN_NO_HT)
-                       return true;
-               return false;
-       case NL80211_CHAN_HT40MINUS:
-       case NL80211_CHAN_HT40PLUS:
-               return (wk_ct == oper_ct);
-       }
-       WARN_ON(1); /* shouldn't get here */
-       return false;
-}
-
-static enum nl80211_channel_type
-ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
-                 enum nl80211_channel_type oper_ct)
-{
-       switch (wk_ct) {
-       case NL80211_CHAN_NO_HT:
-               return oper_ct;
-       case NL80211_CHAN_HT20:
-               if (oper_ct != NL80211_CHAN_NO_HT)
-                       return oper_ct;
-               return wk_ct;
-       case NL80211_CHAN_HT40MINUS:
-       case NL80211_CHAN_HT40PLUS:
-               return wk_ct;
-       }
-       WARN_ON(1); /* shouldn't get here */
-       return wk_ct;
-}
-
-
 static void ieee80211_work_timer(unsigned long data)
 {
        struct ieee80211_local *local = (void *) data;
@@ -950,40 +912,12 @@ static void ieee80211_work_work(struct work_struct *work)
                }
 
                if (!started && !local->tmp_channel) {
-                       bool on_oper_chan, on_oper_chan2;
-                       enum nl80211_channel_type wk_ct;
-
-                       on_oper_chan = ieee80211_cfg_on_oper_channel(local);
-
-                       /* Work with existing channel type if possible. */
-                       wk_ct = wk->chan_type;
-                       if (wk->chan == local->hw.conf.channel)
-                               wk_ct = ieee80211_calc_ct(wk->chan_type,
-                                               local->hw.conf.channel_type);
+                       ieee80211_offchannel_stop_vifs(local, true);
 
                        local->tmp_channel = wk->chan;
-                       local->tmp_channel_type = wk_ct;
-                       /*
-                        * Leave the station vifs in awake mode if they
-                        * happen to be on the same channel as
-                        * the requested channel.
-                        */
-                       on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
-                       if (on_oper_chan != on_oper_chan2) {
-                               if (on_oper_chan2) {
-                                       /* going off oper channel, PS too */
-                                       ieee80211_offchannel_stop_vifs(local,
-                                                                      true);
-                                       ieee80211_hw_config(local, 0);
-                               } else {
-                                       /* going on channel, but leave PS
-                                        * off-channel. */
-                                       ieee80211_hw_config(local, 0);
-                                       ieee80211_offchannel_return(local,
-                                                                   true,
-                                                                   false);
-                               }
-                       }
+                       local->tmp_channel_type = wk->chan_type;
+
+                       ieee80211_hw_config(local, 0);
 
                        started = true;
                        wk->timeout = jiffies;
@@ -1052,34 +986,17 @@ static void ieee80211_work_work(struct work_struct *work)
        list_for_each_entry(wk, &local->work_list, list) {
                if (!wk->started)
                        continue;
-               if (wk->chan != local->tmp_channel)
-                       continue;
-               if (!ieee80211_work_ct_coexists(wk->chan_type,
-                                               local->tmp_channel_type))
+               if (wk->chan != local->tmp_channel ||
+                   wk->chan_type != local->tmp_channel_type)
                        continue;
                remain_off_channel = true;
        }
 
        if (!remain_off_channel && local->tmp_channel) {
                local->tmp_channel = NULL;
-               /* If tmp_channel wasn't operating channel, then
-                * we need to go back on-channel.
-                * NOTE:  If we can ever be here while scannning,
-                * or if the hw_config() channel config logic changes,
-                * then we may need to do a more thorough check to see if
-                * we still need to do a hardware config.  Currently,
-                * we cannot be here while scanning, however.
-                */
-               if (!ieee80211_cfg_on_oper_channel(local))
-                       ieee80211_hw_config(local, 0);
+               ieee80211_hw_config(local, 0);
 
-               /* At the least, we need to disable offchannel_ps,
-                * so just go ahead and run the entire offchannel
-                * return logic here.  We *could* skip enabling
-                * beaconing if we were already on-oper-channel
-                * as a future optimization.
-                */
-               ieee80211_offchannel_return(local, true, true);
+               ieee80211_offchannel_return(local, true);
 
                /* give connection some time to breathe */
                run_again(local, jiffies + HZ/2);
index 4aa0f4b..b4e8ff0 100644 (file)
@@ -229,7 +229,7 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
 }
 EXPORT_SYMBOL(skb_make_writable);
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 /* This does not belong here, but locally generated errors need it if connection
    tracking in use: without this, connection may not be in hash table, and hence
    manufactured ICMP or RST packets will not be associated with it. */
index b71a6e7..1f03556 100644 (file)
@@ -109,7 +109,7 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
 }
 EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 bool
 ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
                    __be16 *port, u8 *proto)
index 13d607a..1aa5cac 100644 (file)
@@ -108,7 +108,7 @@ static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p,
                                  struct ip_vs_conn *ct)
 
 {
-       bool ret = 0;
+       bool ret = false;
 
        if (ct->af == p->af &&
            ip_vs_addr_equal(p->af, p->caddr, &ct->caddr) &&
@@ -121,7 +121,7 @@ static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p,
            ct->protocol == p->protocol &&
            ct->pe_data && ct->pe_data_len == p->pe_data_len &&
            !memcmp(ct->pe_data, p->pe_data, p->pe_data_len))
-               ret = 1;
+               ret = true;
 
        IP_VS_DBG_BUF(9, "SIP template match %s %s->%s:%d %s\n",
                      ip_vs_proto_name(p->protocol),
index 38a576d..72b82b8 100644 (file)
@@ -541,7 +541,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
         * Avoid duplicate tuple in reply direction for NAT traffic
         * to local address when connection is sync-ed
         */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        if (cp->flags & IP_VS_CONN_F_SYNC && local) {
                enum ip_conntrack_info ctinfo;
                struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -658,7 +658,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
         * Avoid duplicate tuple in reply direction for NAT traffic
         * to local address when connection is sync-ed
         */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        if (cp->flags & IP_VS_CONN_F_SYNC && local) {
                enum ip_conntrack_info ctinfo;
                struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -1173,7 +1173,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
         * Avoid duplicate tuple in reply direction for NAT traffic
         * to local address when connection is sync-ed
         */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        if (cp->flags & IP_VS_CONN_F_SYNC && local) {
                enum ip_conntrack_info ctinfo;
                struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -1293,7 +1293,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
         * Avoid duplicate tuple in reply direction for NAT traffic
         * to local address when connection is sync-ed
         */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        if (cp->flags & IP_VS_CONN_F_SYNC && local) {
                enum ip_conntrack_info ctinfo;
                struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
index 9332906..f4f8cda 100644 (file)
@@ -18,7 +18,7 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 
-static int nf_ct_acct __read_mostly;
+static bool nf_ct_acct __read_mostly;
 
 module_param_named(acct, nf_ct_acct, bool, 0644);
 MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting.");
index b76090f..e875f89 100644 (file)
@@ -1084,7 +1084,7 @@ static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
 };
 #endif
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -1339,8 +1339,7 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
                                        get_order(sz));
        if (!hash) {
                printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
-               hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
-                                PAGE_KERNEL);
+               hash = vzalloc(sz);
        }
 
        if (hash && nulls)
index 6f5801e..8c5c95c 100644 (file)
@@ -42,7 +42,7 @@ static u_int16_t ports[MAX_PORTS];
 static unsigned int ports_c;
 module_param_array(ports, ushort, &ports_c, 0400);
 
-static int loose;
+static bool loose;
 module_param(loose, bool, 0600);
 
 unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
index f9368f3..722291f 100644 (file)
@@ -42,7 +42,7 @@ static int gkrouted_only __read_mostly = 1;
 module_param(gkrouted_only, int, 0600);
 MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
 
-static int callforward_filter __read_mostly = 1;
+static bool callforward_filter __read_mostly = true;
 module_param(callforward_filter, bool, 0600);
 MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
                                     "if both endpoints are on different sides "
@@ -743,8 +743,7 @@ static int callforward_do_filter(const union nf_inet_addr *src,
                }
                break;
        }
-#if defined(CONFIG_NF_CONNTRACK_IPV6) || \
-    defined(CONFIG_NF_CONNTRACK_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
        case AF_INET6: {
                struct flowi6 fl1, fl2;
                struct rt6_info *rt1, *rt2;
index 2e664a6..d6dde6d 100644 (file)
@@ -629,7 +629,7 @@ static int dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
        return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]);
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
                          struct nf_conn *ct)
 {
@@ -770,7 +770,7 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
        .error                  = dccp_error,
        .print_tuple            = dccp_print_tuple,
        .print_conntrack        = dccp_print_conntrack,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = dccp_to_nlattr,
        .nlattr_size            = dccp_nlattr_size,
        .from_nlattr            = nlattr_to_dccp,
@@ -792,7 +792,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
        .error                  = dccp_error,
        .print_tuple            = dccp_print_tuple,
        .print_conntrack        = dccp_print_conntrack,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = dccp_to_nlattr,
        .nlattr_size            = dccp_nlattr_size,
        .from_nlattr            = nlattr_to_dccp,
index d69facd..f033879 100644 (file)
@@ -291,7 +291,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
        .new             = gre_new,
        .destroy         = gre_destroy,
        .me              = THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
        .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
        .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
index 6772b11..afa6913 100644 (file)
@@ -461,7 +461,7 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
        return true;
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -666,7 +666,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
        .packet                 = sctp_packet,
        .new                    = sctp_new,
        .me                     = THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = sctp_to_nlattr,
        .nlattr_size            = sctp_nlattr_size,
        .from_nlattr            = nlattr_to_sctp,
@@ -696,7 +696,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
        .packet                 = sctp_packet,
        .new                    = sctp_new,
        .me                     = THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = sctp_to_nlattr,
        .nlattr_size            = sctp_nlattr_size,
        .from_nlattr            = nlattr_to_sctp,
index 8235b86..97b9f3e 100644 (file)
@@ -1126,7 +1126,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
        return true;
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -1447,7 +1447,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
        .packet                 = tcp_packet,
        .new                    = tcp_new,
        .error                  = tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = tcp_to_nlattr,
        .nlattr_size            = tcp_nlattr_size,
        .from_nlattr            = nlattr_to_tcp,
@@ -1479,7 +1479,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
        .packet                 = tcp_packet,
        .new                    = tcp_new,
        .error                  = tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = tcp_to_nlattr,
        .nlattr_size            = tcp_nlattr_size,
        .from_nlattr            = nlattr_to_tcp,
index 8289088..5f35757 100644 (file)
@@ -188,7 +188,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
        .packet                 = udp_packet,
        .new                    = udp_new,
        .error                  = udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
@@ -216,7 +216,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
        .packet                 = udp_packet,
        .new                    = udp_new,
        .error                  = udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
index 263b5a7..f52ca11 100644 (file)
@@ -174,7 +174,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
        .packet                 = udplite_packet,
        .new                    = udplite_new,
        .error                  = udplite_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
@@ -198,7 +198,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
        .packet                 = udplite_packet,
        .new                    = udplite_new,
        .error                  = udplite_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
index af7dd31..e8d27af 100644 (file)
@@ -15,7 +15,7 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_timestamp.h>
 
-static int nf_ct_tstamp __read_mostly;
+static bool nf_ct_tstamp __read_mostly;
 
 module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
 MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
index d4f4b5d..95237c8 100644 (file)
@@ -49,7 +49,7 @@ static u32 hash_v4(const struct sk_buff *skb)
        return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static u32 hash_v6(const struct sk_buff *skb)
 {
        const struct ipv6hdr *ip6h = ipv6_hdr(skb);
@@ -74,7 +74,7 @@ nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
                if (par->family == NFPROTO_IPV4)
                        queue = (((u64) hash_v4(skb) * info->queues_total) >>
                                 32) + queue;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
                else if (par->family == NFPROTO_IPV6)
                        queue = (((u64) hash_v6(skb) * info->queues_total) >>
                                 32) + queue;
index ba72262..190ad37 100644 (file)
@@ -198,7 +198,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static unsigned int
 tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
@@ -260,7 +260,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
        return -EINVAL;
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
 {
        const struct xt_tcpmss_info *info = par->targinfo;
@@ -293,7 +293,7 @@ static struct xt_target tcpmss_tg_reg[] __read_mostly = {
                .proto          = IPPROTO_TCP,
                .me             = THIS_MODULE,
        },
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        {
                .family         = NFPROTO_IPV6,
                .name           = "TCPMSS",
index 3a295cc..25fd1c4 100644 (file)
@@ -80,7 +80,7 @@ tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
               sizeof(struct iphdr) + sizeof(struct tcphdr));
 }
 
-#if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
 static unsigned int
 tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
@@ -109,7 +109,7 @@ static struct xt_target tcpoptstrip_tg_reg[] __read_mostly = {
                .targetsize = sizeof(struct xt_tcpoptstrip_target_info),
                .me         = THIS_MODULE,
        },
-#if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
        {
                .name       = "TCPOPTSTRIP",
                .family     = NFPROTO_IPV6,
index 5f054a0..3aae66f 100644 (file)
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_TEE.h>
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #      define WITH_CONNTRACK 1
 #      include <net/netfilter/nf_conntrack.h>
 #endif
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-#      define WITH_IPV6 1
-#endif
 
 struct xt_tee_priv {
        struct notifier_block   notifier;
@@ -136,7 +133,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-#ifdef WITH_IPV6
+#if IS_ENABLED(CONFIG_IPV6)
 static bool
 tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
 {
@@ -196,7 +193,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        }
        return XT_CONTINUE;
 }
-#endif /* WITH_IPV6 */
+#endif
 
 static int tee_netdev_event(struct notifier_block *this, unsigned long event,
                            void *ptr)
@@ -276,7 +273,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
                .destroy    = tee_tg_destroy,
                .me         = THIS_MODULE,
        },
-#ifdef WITH_IPV6
+#if IS_ENABLED(CONFIG_IPV6)
        {
                .name       = "TEE",
                .revision   = 1,
index dcfd57e..35a959a 100644 (file)
@@ -22,7 +22,7 @@
 
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #define XT_TPROXY_HAVE_IPV6 1
 #include <net/if_inet6.h>
 #include <net/addrconf.h>
index c047de2..49c5ff7 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/ip.h>
 #include <net/route.h>
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
 #include <net/ip6_fib.h>
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Xtables: address type match");
 MODULE_ALIAS("ipt_addrtype");
 MODULE_ALIAS("ip6t_addrtype");
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
                            const struct in6_addr *addr)
 {
@@ -149,7 +149,7 @@ addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
        else if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
                dev = par->out;
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        if (par->family == NFPROTO_IPV6)
                return addrtype_mt6(net, dev, skb, info);
 #endif
@@ -190,7 +190,7 @@ static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
                return -EINVAL;
        }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        if (par->family == NFPROTO_IPV6) {
                if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
                        pr_err("ipv6 BLACKHOLE matching not supported\n");
index 2b8418c..e595e07 100644 (file)
@@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
                break;
        }
 
-       if (sinfo->count.to)
+       if (sinfo->count.to >= sinfo->count.from)
                return what <= sinfo->count.to && what >= sinfo->count.from;
-       else
-               return what >= sinfo->count.from;
+       else /* inverted */
+               return what < sinfo->count.to || what > sinfo->count.from;
 }
 
 static int connbytes_mt_check(const struct xt_mtchk_param *par)
index 068698f..8e49921 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/mm.h>
 #include <linux/in.h>
 #include <linux/ip.h>
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #include <linux/ipv6.h>
 #include <net/ipv6.h>
 #endif
@@ -64,7 +64,7 @@ struct dsthash_dst {
                        __be32 src;
                        __be32 dst;
                } ip;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
                struct {
                        __be32 src[4];
                        __be32 dst[4];
@@ -413,7 +413,7 @@ static inline __be32 maskl(__be32 a, unsigned int l)
        return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0;
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
 {
        switch (p) {
@@ -464,7 +464,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
                        return 0;
                nexthdr = ip_hdr(skb)->protocol;
                break;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        case NFPROTO_IPV6:
                if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
                        memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
@@ -616,7 +616,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
                .destroy        = hashlimit_mt_destroy,
                .me             = THIS_MODULE,
        },
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        {
                .name           = "hashlimit",
                .revision       = 1,
@@ -693,7 +693,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
                                 ent->rateinfo.credit, ent->rateinfo.credit_cap,
                                 ent->rateinfo.cost);
                break;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        case NFPROTO_IPV6:
                res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
                                 (long)(ent->expires - jiffies)/HZ,
@@ -761,7 +761,7 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
        hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
        if (!hashlimit_net->ipt_hashlimit)
                return -ENOMEM;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
        if (!hashlimit_net->ip6t_hashlimit) {
                proc_net_remove(net, "ipt_hashlimit");
@@ -774,7 +774,7 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
 static void __net_exit hashlimit_proc_net_exit(struct net *net)
 {
        proc_net_remove(net, "ipt_hashlimit");
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        proc_net_remove(net, "ip6t_hashlimit");
 #endif
 }
index c302e30..72bb07f 100644 (file)
@@ -22,7 +22,7 @@
 #include <net/netfilter/nf_tproxy_core.h>
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #define XT_SOCKET_HAVE_IPV6 1
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
@@ -30,7 +30,7 @@
 
 #include <linux/netfilter/xt_socket.h>
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #define XT_SOCKET_HAVE_CONNTRACK 1
 #include <net/netfilter/nf_conntrack.h>
 #endif
index 96b749d..6f17013 100644 (file)
@@ -96,7 +96,7 @@ struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr,
 }
 
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_search - Search for a matching IPv6 address entry
  * @addr: IPv6 address
@@ -185,7 +185,7 @@ int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head)
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_add - Add a new IPv6 address entry to a list
  * @entry: address entry
@@ -263,7 +263,7 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
        return entry;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_remove_entry - Remove an IPv6 address entry
  * @entry: address entry
@@ -342,7 +342,7 @@ void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
        }
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_audit_addr - Audit an IPv6 address
  * @audit_buf: audit buffer
index fdbc1d2..a1287ce 100644 (file)
@@ -133,7 +133,7 @@ static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
 }
 #endif
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 #define __af6list_entry(ptr) container_of(ptr, struct netlbl_af6list, list)
 
index 3f905e5..3820411 100644 (file)
@@ -78,7 +78,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
        struct netlbl_dom_map *ptr;
        struct netlbl_af4list *iter4;
        struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
        struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -90,7 +90,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
                        netlbl_af4list_remove_entry(iter4);
                        kfree(netlbl_domhsh_addr4_entry(iter4));
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_safe(iter6, tmp6,
                                            &ptr->type_def.addrsel->list6) {
                        netlbl_af6list_remove_entry(iter6);
@@ -217,7 +217,7 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
                        cipsov4 = map4->type_def.cipsov4;
                        netlbl_af4list_audit_addr(audit_buf, 0, NULL,
                                                  addr4->addr, addr4->mask);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                } else if (addr6 != NULL) {
                        struct netlbl_domaddr6_map *map6;
                        map6 = netlbl_domhsh_addr6_entry(addr6);
@@ -306,7 +306,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
        struct netlbl_dom_map *entry_old;
        struct netlbl_af4list *iter4;
        struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
        struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -338,7 +338,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
                                               &entry->type_def.addrsel->list4)
                                netlbl_domhsh_audit_add(entry, iter4, NULL,
                                                        ret_val, audit_info);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        netlbl_af6list_foreach_rcu(iter6,
                                               &entry->type_def.addrsel->list6)
                                netlbl_domhsh_audit_add(entry, NULL, iter6,
@@ -365,7 +365,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
                                ret_val = -EEXIST;
                                goto add_return;
                        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_rcu(iter6,
                                           &entry->type_def.addrsel->list6)
                        if (netlbl_af6list_search_exact(&iter6->addr,
@@ -386,7 +386,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
                        if (ret_val != 0)
                                goto add_return;
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_safe(iter6, tmp6,
                                            &entry->type_def.addrsel->list6) {
                        netlbl_af6list_remove_entry(iter6);
@@ -510,7 +510,7 @@ int netlbl_domhsh_remove_af4(const char *domain,
        struct netlbl_dom_map *entry_map;
        struct netlbl_af4list *entry_addr;
        struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
 #endif /* IPv6 */
        struct netlbl_domaddr4_map *entry;
@@ -533,7 +533,7 @@ int netlbl_domhsh_remove_af4(const char *domain,
                goto remove_af4_failure;
        netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4)
                goto remove_af4_single_addr;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6)
                goto remove_af4_single_addr;
 #endif /* IPv6 */
@@ -644,7 +644,7 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
        return netlbl_domhsh_addr4_entry(addr_iter);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table
  * @domain: the domain name to search for
index bfcc0f7..90872c4 100644 (file)
@@ -104,7 +104,7 @@ int netlbl_domhsh_walk(u32 *skip_bkt,
                     int (*callback) (struct netlbl_dom_map *entry, void *arg),
                     void *cb_arg);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
                                                  const struct in6_addr *addr);
 #endif /* IPv6 */
index 5952237..2560e7b 100644 (file)
@@ -147,7 +147,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
                                goto cfg_unlbl_map_add_failure;
                        break;
                        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                case AF_INET6: {
                        const struct in6_addr *addr6 = addr;
                        const struct in6_addr *mask6 = mask;
@@ -227,7 +227,7 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
@@ -270,7 +270,7 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
@@ -673,7 +673,7 @@ int netlbl_sock_setattr(struct sock *sk,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -724,7 +724,7 @@ int netlbl_sock_getattr(struct sock *sk,
        case AF_INET:
                ret_val = cipso_v4_sock_getattr(sk, secattr);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                ret_val = -ENOMSG;
                break;
@@ -782,7 +782,7 @@ int netlbl_conn_setattr(struct sock *sk,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -853,7 +853,7 @@ int netlbl_req_setattr(struct request_sock *req,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -926,7 +926,7 @@ int netlbl_skbuff_setattr(struct sk_buff *skb,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -965,7 +965,7 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb,
                    cipso_v4_skbuff_getattr(skb, secattr) == 0)
                        return 0;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                break;
 #endif /* IPv6 */
index 9879300..4809e2e 100644 (file)
@@ -184,7 +184,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
 
                entry->type = NETLBL_NLTYPE_ADDRSELECT;
                entry->type_def.addrsel = addrmap;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        } else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) {
                struct in6_addr *addr;
                struct in6_addr *mask;
@@ -270,7 +270,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
        struct nlattr *nla_a;
        struct nlattr *nla_b;
        struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
 #endif
 
@@ -324,7 +324,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
 
                        nla_nest_end(skb, nla_b);
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_rcu(iter6,
                                           &entry->type_def.addrsel->list6) {
                        struct netlbl_domaddr6_map *map6;
index 049ccd2..4b5fa0f 100644 (file)
@@ -170,7 +170,7 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
        struct netlbl_unlhsh_iface *iface;
        struct netlbl_af4list *iter4;
        struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
        struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -184,7 +184,7 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
                netlbl_af4list_remove_entry(iter4);
                kfree(netlbl_unlhsh_addr4_entry(iter4));
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_safe(iter6, tmp6, &iface->addr6_list) {
                netlbl_af6list_remove_entry(iter6);
                kfree(netlbl_unlhsh_addr6_entry(iter6));
@@ -274,7 +274,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
        return ret_val;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_unlhsh_add_addr6 - Add a new IPv6 address entry to the hash table
  * @iface: the associated interface entry
@@ -436,7 +436,7 @@ int netlbl_unlhsh_add(struct net *net,
                                                  mask4->s_addr);
                break;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case sizeof(struct in6_addr): {
                const struct in6_addr *addr6 = addr;
                const struct in6_addr *mask6 = mask;
@@ -531,7 +531,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_unlhsh_remove_addr6 - Remove an IPv6 address entry
  * @net: network namespace
@@ -606,14 +606,14 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
 static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
 {
        struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
 #endif /* IPv6 */
 
        spin_lock(&netlbl_unlhsh_lock);
        netlbl_af4list_foreach_rcu(iter4, &iface->addr4_list)
                goto unlhsh_condremove_failure;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_rcu(iter6, &iface->addr6_list)
                goto unlhsh_condremove_failure;
 #endif /* IPv6 */
@@ -680,7 +680,7 @@ int netlbl_unlhsh_remove(struct net *net,
                                                     iface, addr, mask,
                                                     audit_info);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case sizeof(struct in6_addr):
                ret_val = netlbl_unlhsh_remove_addr6(net,
                                                     iface, addr, mask,
@@ -1196,7 +1196,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
        struct netlbl_unlhsh_iface *iface;
        struct list_head *iter_list;
        struct netlbl_af4list *addr4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *addr6;
 #endif
 
@@ -1228,7 +1228,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
                                        goto unlabel_staticlist_return;
                                }
                        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        netlbl_af6list_foreach_rcu(addr6,
                                                   &iface->addr6_list) {
                                if (iter_addr6++ < skip_addr6)
@@ -1277,7 +1277,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
        u32 skip_addr6 = cb->args[1];
        u32 iter_addr4 = 0;
        struct netlbl_af4list *addr4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        u32 iter_addr6 = 0;
        struct netlbl_af6list *addr6;
 #endif
@@ -1303,7 +1303,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
                        goto unlabel_staticlistdef_return;
                }
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) {
                if (iter_addr6++ < skip_addr6)
                        continue;
@@ -1494,7 +1494,7 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
                secattr->attr.secid = netlbl_unlhsh_addr4_entry(addr4)->secid;
                break;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case PF_INET6: {
                struct ipv6hdr *hdr6;
                struct netlbl_af6list *addr6;
index 1201b6d..629b061 100644 (file)
@@ -139,12 +139,12 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
 
 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
 
-static u32 netlink_group_mask(u32 group)
+static inline u32 netlink_group_mask(u32 group)
 {
        return group ? 1 << (group - 1) : 0;
 }
 
-static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
+static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
 {
        return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
 }
@@ -226,8 +226,7 @@ netlink_unlock_table(void)
                wake_up(&nl_table_wait);
 }
 
-static inline struct sock *netlink_lookup(struct net *net, int protocol,
-                                         u32 pid)
+static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid)
 {
        struct nl_pid_hash *hash = &nl_table[protocol].hash;
        struct hlist_head *head;
@@ -248,7 +247,7 @@ found:
        return sk;
 }
 
-static inline struct hlist_head *nl_pid_hash_zalloc(size_t size)
+static struct hlist_head *nl_pid_hash_zalloc(size_t size)
 {
        if (size <= PAGE_SIZE)
                return kzalloc(size, GFP_ATOMIC);
@@ -258,7 +257,7 @@ static inline struct hlist_head *nl_pid_hash_zalloc(size_t size)
                                         get_order(size));
 }
 
-static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
+static void nl_pid_hash_free(struct hlist_head *table, size_t size)
 {
        if (size <= PAGE_SIZE)
                kfree(table);
@@ -578,7 +577,7 @@ retry:
        return err;
 }
 
-static inline int netlink_capable(struct socket *sock, unsigned int flag)
+static inline int netlink_capable(const struct socket *sock, unsigned int flag)
 {
        return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
               capable(CAP_NET_ADMIN);
@@ -846,8 +845,7 @@ void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
        sock_put(sk);
 }
 
-static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
-                                          gfp_t allocation)
+static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
 {
        int delta;
 
@@ -871,7 +869,7 @@ static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
        return skb;
 }
 
-static inline void netlink_rcv_wake(struct sock *sk)
+static void netlink_rcv_wake(struct sock *sk)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
 
@@ -881,7 +879,7 @@ static inline void netlink_rcv_wake(struct sock *sk)
                wake_up_interruptible(&nlk->wait);
 }
 
-static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
+static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
 {
        int ret;
        struct netlink_sock *nlk = nlk_sk(sk);
@@ -952,8 +950,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
 }
 EXPORT_SYMBOL_GPL(netlink_has_listeners);
 
-static inline int netlink_broadcast_deliver(struct sock *sk,
-                                           struct sk_buff *skb)
+static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
 
@@ -962,7 +959,7 @@ static inline int netlink_broadcast_deliver(struct sock *sk,
                skb_set_owner_r(skb, sk);
                skb_queue_tail(&sk->sk_receive_queue, skb);
                sk->sk_data_ready(sk, skb->len);
-               return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
+               return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
        }
        return -1;
 }
@@ -982,7 +979,7 @@ struct netlink_broadcast_data {
        void *tx_data;
 };
 
-static inline int do_one_broadcast(struct sock *sk,
+static int do_one_broadcast(struct sock *sk,
                                   struct netlink_broadcast_data *p)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
@@ -1110,8 +1107,7 @@ struct netlink_set_err_data {
        int code;
 };
 
-static inline int do_one_set_err(struct sock *sk,
-                                struct netlink_set_err_data *p)
+static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
        int ret = 0;
index 28453ae..21a8241 100644 (file)
@@ -106,7 +106,7 @@ static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
 /* Of course we are going to have problems once we hit
  * 2^16 alive types, but that can only happen by year 2K
 */
-static inline u16 genl_generate_id(void)
+static u16 genl_generate_id(void)
 {
        static u16 id_gen_idx = GENL_MIN_ID;
        int i;
index 58cddad..44c865b 100644 (file)
@@ -14,5 +14,6 @@ menuconfig NFC
          be called nfc.
 
 source "net/nfc/nci/Kconfig"
+source "net/nfc/llcp/Kconfig"
 
 source "drivers/nfc/Kconfig"
index fbb550f..7b4a6dc 100644 (file)
@@ -6,3 +6,4 @@ obj-$(CONFIG_NFC) += nfc.o
 obj-$(CONFIG_NFC_NCI) += nci/
 
 nfc-objs := core.o netlink.o af_nfc.o rawsock.o
+nfc-$(CONFIG_NFC_LLCP) += llcp/llcp.o llcp/commands.o llcp/sock.o
index 47e02c1..3ddf6e6 100644 (file)
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/nfc.h>
 
 #include "nfc.h"
 
 int nfc_devlist_generation;
 DEFINE_MUTEX(nfc_devlist_mutex);
 
-int nfc_printk(const char *level, const char *format, ...)
-{
-       struct va_format vaf;
-       va_list args;
-       int r;
-
-       va_start(args, format);
-
-       vaf.fmt = format;
-       vaf.va = &args;
-
-       r = printk("%sNFC: %pV\n", level, &vaf);
-
-       va_end(args);
-
-       return r;
-}
-EXPORT_SYMBOL(nfc_printk);
-
 /**
  * nfc_dev_up - turn on the NFC device
  *
@@ -63,7 +47,7 @@ int nfc_dev_up(struct nfc_dev *dev)
 {
        int rc = 0;
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        device_lock(&dev->dev);
 
@@ -97,7 +81,7 @@ int nfc_dev_down(struct nfc_dev *dev)
 {
        int rc = 0;
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        device_lock(&dev->dev);
 
@@ -139,7 +123,8 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols)
 {
        int rc;
 
-       nfc_dbg("dev_name=%s protocols=0x%x", dev_name(&dev->dev), protocols);
+       pr_debug("dev_name=%s protocols=0x%x\n",
+                dev_name(&dev->dev), protocols);
 
        if (!protocols)
                return -EINVAL;
@@ -174,7 +159,7 @@ int nfc_stop_poll(struct nfc_dev *dev)
 {
        int rc = 0;
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        device_lock(&dev->dev);
 
@@ -196,6 +181,86 @@ error:
        return rc;
 }
 
+int nfc_dep_link_up(struct nfc_dev *dev, int target_index,
+                                       u8 comm_mode, u8 rf_mode)
+{
+       int rc = 0;
+
+       pr_debug("dev_name=%s comm:%d rf:%d\n",
+                       dev_name(&dev->dev), comm_mode, rf_mode);
+
+       if (!dev->ops->dep_link_up)
+               return -EOPNOTSUPP;
+
+       device_lock(&dev->dev);
+
+       if (!device_is_registered(&dev->dev)) {
+               rc = -ENODEV;
+               goto error;
+       }
+
+       if (dev->dep_link_up == true) {
+               rc = -EALREADY;
+               goto error;
+       }
+
+       rc = dev->ops->dep_link_up(dev, target_index, comm_mode, rf_mode);
+
+error:
+       device_unlock(&dev->dev);
+       return rc;
+}
+
+int nfc_dep_link_down(struct nfc_dev *dev)
+{
+       int rc = 0;
+
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
+
+       if (!dev->ops->dep_link_down)
+               return -EOPNOTSUPP;
+
+       device_lock(&dev->dev);
+
+       if (!device_is_registered(&dev->dev)) {
+               rc = -ENODEV;
+               goto error;
+       }
+
+       if (dev->dep_link_up == false) {
+               rc = -EALREADY;
+               goto error;
+       }
+
+       if (dev->dep_rf_mode == NFC_RF_TARGET) {
+               rc = -EOPNOTSUPP;
+               goto error;
+       }
+
+       rc = dev->ops->dep_link_down(dev);
+       if (!rc) {
+               dev->dep_link_up = false;
+               nfc_llcp_mac_is_down(dev);
+               nfc_genl_dep_link_down_event(dev);
+       }
+
+error:
+       device_unlock(&dev->dev);
+       return rc;
+}
+
+int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
+                                       u8 comm_mode, u8 rf_mode)
+{
+       dev->dep_link_up = true;
+       dev->dep_rf_mode = rf_mode;
+
+       nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode);
+
+       return nfc_genl_dep_link_up_event(dev, target_idx, comm_mode, rf_mode);
+}
+EXPORT_SYMBOL(nfc_dep_link_is_up);
+
 /**
  * nfc_activate_target - prepare the target for data exchange
  *
@@ -207,8 +272,8 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
 {
        int rc;
 
-       nfc_dbg("dev_name=%s target_idx=%u protocol=%u", dev_name(&dev->dev),
-                                                       target_idx, protocol);
+       pr_debug("dev_name=%s target_idx=%u protocol=%u\n",
+                dev_name(&dev->dev), target_idx, protocol);
 
        device_lock(&dev->dev);
 
@@ -236,7 +301,8 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
 {
        int rc = 0;
 
-       nfc_dbg("dev_name=%s target_idx=%u", dev_name(&dev->dev), target_idx);
+       pr_debug("dev_name=%s target_idx=%u\n",
+                dev_name(&dev->dev), target_idx);
 
        device_lock(&dev->dev);
 
@@ -271,8 +337,8 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx,
 {
        int rc;
 
-       nfc_dbg("dev_name=%s target_idx=%u skb->len=%u", dev_name(&dev->dev),
-                                                       target_idx, skb->len);
+       pr_debug("dev_name=%s target_idx=%u skb->len=%u\n",
+                dev_name(&dev->dev), target_idx, skb->len);
 
        device_lock(&dev->dev);
 
@@ -289,13 +355,54 @@ error:
        return rc;
 }
 
+int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+{
+       pr_debug("dev_name=%s gb_len=%d\n",
+                       dev_name(&dev->dev), gb_len);
+
+       if (gb_len > NFC_MAX_GT_LEN)
+               return -EINVAL;
+
+       return nfc_llcp_set_remote_gb(dev, gb, gb_len);
+}
+EXPORT_SYMBOL(nfc_set_remote_general_bytes);
+
+u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len)
+{
+       return nfc_llcp_general_bytes(dev, gt_len);
+}
+EXPORT_SYMBOL(nfc_get_local_general_bytes);
+
 /**
- * nfc_alloc_skb - allocate a skb for data exchange responses
+ * nfc_alloc_send_skb - allocate a skb for data exchange responses
  *
  * @size: size to allocate
  * @gfp: gfp flags
  */
-struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp)
+struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
+                                       unsigned int flags, unsigned int size,
+                                       unsigned int *err)
+{
+       struct sk_buff *skb;
+       unsigned int total_size;
+
+       total_size = size +
+               dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+       skb = sock_alloc_send_skb(sk, total_size, flags & MSG_DONTWAIT, err);
+       if (skb)
+               skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+       return skb;
+}
+
+/**
+ * nfc_alloc_recv_skb - allocate a skb for data exchange responses
+ *
+ * @size: size to allocate
+ * @gfp: gfp flags
+ */
+struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp)
 {
        struct sk_buff *skb;
        unsigned int total_size;
@@ -308,7 +415,7 @@ struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp)
 
        return skb;
 }
-EXPORT_SYMBOL(nfc_alloc_skb);
+EXPORT_SYMBOL(nfc_alloc_recv_skb);
 
 /**
  * nfc_targets_found - inform that targets were found
@@ -326,7 +433,7 @@ int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
 {
        int i;
 
-       nfc_dbg("dev_name=%s n_targets=%d", dev_name(&dev->dev), n_targets);
+       pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
 
        dev->polling = false;
 
@@ -360,7 +467,7 @@ static void nfc_release(struct device *d)
 {
        struct nfc_dev *dev = to_nfc_dev(d);
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        nfc_genl_data_exit(&dev->genl_data);
        kfree(dev->targets);
@@ -446,7 +553,7 @@ int nfc_register_device(struct nfc_dev *dev)
 {
        int rc;
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        mutex_lock(&nfc_devlist_mutex);
        nfc_devlist_generation++;
@@ -456,11 +563,14 @@ int nfc_register_device(struct nfc_dev *dev)
        if (rc < 0)
                return rc;
 
-       rc = nfc_genl_device_added(dev);
+       rc = nfc_llcp_register_device(dev);
        if (rc)
-               nfc_dbg("The userspace won't be notified that the device %s was"
-                                               " added", dev_name(&dev->dev));
+               pr_err("Could not register llcp device\n");
 
+       rc = nfc_genl_device_added(dev);
+       if (rc)
+               pr_debug("The userspace won't be notified that the device %s was added\n",
+                        dev_name(&dev->dev));
 
        return 0;
 }
@@ -475,7 +585,7 @@ void nfc_unregister_device(struct nfc_dev *dev)
 {
        int rc;
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        mutex_lock(&nfc_devlist_mutex);
        nfc_devlist_generation++;
@@ -488,10 +598,12 @@ void nfc_unregister_device(struct nfc_dev *dev)
 
        mutex_unlock(&nfc_devlist_mutex);
 
+       nfc_llcp_unregister_device(dev);
+
        rc = nfc_genl_device_removed(dev);
        if (rc)
-               nfc_dbg("The userspace won't be notified that the device %s"
-                                       " was removed", dev_name(&dev->dev));
+               pr_debug("The userspace won't be notified that the device %s was removed\n",
+                        dev_name(&dev->dev));
 
 }
 EXPORT_SYMBOL(nfc_unregister_device);
@@ -500,7 +612,7 @@ static int __init nfc_init(void)
 {
        int rc;
 
-       nfc_info("NFC Core ver %s", VERSION);
+       pr_info("NFC Core ver %s\n", VERSION);
 
        rc = class_register(&nfc_class);
        if (rc)
@@ -517,6 +629,10 @@ static int __init nfc_init(void)
        if (rc)
                goto err_rawsock;
 
+       rc = nfc_llcp_init();
+       if (rc)
+               goto err_llcp_sock;
+
        rc = af_nfc_init();
        if (rc)
                goto err_af_nfc;
@@ -524,6 +640,8 @@ static int __init nfc_init(void)
        return 0;
 
 err_af_nfc:
+       nfc_llcp_exit();
+err_llcp_sock:
        rawsock_exit();
 err_rawsock:
        nfc_genl_exit();
@@ -535,6 +653,7 @@ err_genl:
 static void __exit nfc_exit(void)
 {
        af_nfc_exit();
+       nfc_llcp_exit();
        rawsock_exit();
        nfc_genl_exit();
        class_unregister(&nfc_class);
diff --git a/net/nfc/llcp/Kconfig b/net/nfc/llcp/Kconfig
new file mode 100644 (file)
index 0000000..fbf5e81
--- /dev/null
@@ -0,0 +1,7 @@
+config NFC_LLCP
+       depends on NFC && EXPERIMENTAL
+       bool "NFC LLCP support (EXPERIMENTAL)"
+       default n
+       help
+        Say Y here if you want to build support for a kernel NFC LLCP
+        implementation.
\ No newline at end of file
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
new file mode 100644 (file)
index 0000000..151f2ef
--- /dev/null
@@ -0,0 +1,399 @@
+/*
+ * Copyright (C) 2011  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "llcp: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+
+#include <net/nfc/nfc.h>
+
+#include "../nfc.h"
+#include "llcp.h"
+
+static u8 llcp_tlv_length[LLCP_TLV_MAX] = {
+       0,
+       1, /* VERSION */
+       2, /* MIUX */
+       2, /* WKS */
+       1, /* LTO */
+       1, /* RW */
+       0, /* SN */
+       1, /* OPT */
+       0, /* SDREQ */
+       2, /* SDRES */
+
+};
+
+static u8 llcp_tlv8(u8 *tlv, u8 type)
+{
+       if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
+               return 0;
+
+       return tlv[2];
+}
+
+static u8 llcp_tlv16(u8 *tlv, u8 type)
+{
+       if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
+               return 0;
+
+       return be16_to_cpu(*((__be16 *)(tlv + 2)));
+}
+
+
+static u8 llcp_tlv_version(u8 *tlv)
+{
+       return llcp_tlv8(tlv, LLCP_TLV_VERSION);
+}
+
+static u16 llcp_tlv_miux(u8 *tlv)
+{
+       return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7f;
+}
+
+static u16 llcp_tlv_wks(u8 *tlv)
+{
+       return llcp_tlv16(tlv, LLCP_TLV_WKS);
+}
+
+static u16 llcp_tlv_lto(u8 *tlv)
+{
+       return llcp_tlv8(tlv, LLCP_TLV_LTO);
+}
+
+static u8 llcp_tlv_opt(u8 *tlv)
+{
+       return llcp_tlv8(tlv, LLCP_TLV_OPT);
+}
+
+static u8 llcp_tlv_rw(u8 *tlv)
+{
+       return llcp_tlv8(tlv, LLCP_TLV_RW) & 0xf;
+}
+
+u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
+{
+       u8 *tlv, length;
+
+       pr_debug("type %d\n", type);
+
+       if (type >= LLCP_TLV_MAX)
+               return NULL;
+
+       length = llcp_tlv_length[type];
+       if (length == 0 && value_length == 0)
+               return NULL;
+       else
+               length = value_length;
+
+       *tlv_length = 2 + length;
+       tlv = kzalloc(2 + length, GFP_KERNEL);
+       if (tlv == NULL)
+               return tlv;
+
+       tlv[0] = type;
+       tlv[1] = length;
+       memcpy(tlv + 2, value, length);
+
+       return tlv;
+}
+
+int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
+                       u8 *tlv_array, u16 tlv_array_len)
+{
+       u8 *tlv = tlv_array, type, length, offset = 0;
+
+       pr_debug("TLV array length %d\n", tlv_array_len);
+
+       if (local == NULL)
+               return -ENODEV;
+
+       while (offset < tlv_array_len) {
+               type = tlv[0];
+               length = tlv[1];
+
+               pr_debug("type 0x%x length %d\n", type, length);
+
+               switch (type) {
+               case LLCP_TLV_VERSION:
+                       local->remote_version = llcp_tlv_version(tlv);
+                       break;
+               case LLCP_TLV_MIUX:
+                       local->remote_miu = llcp_tlv_miux(tlv) + 128;
+                       break;
+               case LLCP_TLV_WKS:
+                       local->remote_wks = llcp_tlv_wks(tlv);
+                       break;
+               case LLCP_TLV_LTO:
+                       local->remote_lto = llcp_tlv_lto(tlv) * 10;
+                       break;
+               case LLCP_TLV_OPT:
+                       local->remote_opt = llcp_tlv_opt(tlv);
+                       break;
+               case LLCP_TLV_RW:
+                       local->remote_rw = llcp_tlv_rw(tlv);
+                       break;
+               default:
+                       pr_err("Invalid gt tlv value 0x%x\n", type);
+                       break;
+               }
+
+               offset += length + 2;
+               tlv += length + 2;
+       }
+
+       pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n",
+               local->remote_version, local->remote_miu,
+               local->remote_lto, local->remote_opt,
+               local->remote_wks, local->remote_rw);
+
+       return 0;
+}
+
+static struct sk_buff *llcp_add_header(struct sk_buff *pdu,
+                                       u8 dsap, u8 ssap, u8 ptype)
+{
+       u8 header[2];
+
+       pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap);
+
+       header[0] = (u8)((dsap << 2) | (ptype >> 2));
+       header[1] = (u8)((ptype << 6) | ssap);
+
+       pr_debug("header 0x%x 0x%x\n", header[0], header[1]);
+
+       memcpy(skb_put(pdu, LLCP_HEADER_SIZE), header, LLCP_HEADER_SIZE);
+
+       return pdu;
+}
+
+static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv, u8 tlv_length)
+{
+       /* XXX Add an skb length check */
+
+       if (tlv == NULL)
+               return NULL;
+
+       memcpy(skb_put(pdu, tlv_length), tlv, tlv_length);
+
+       return pdu;
+}
+
+static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock,
+                                                       u8 cmd, u16 size)
+{
+       struct sk_buff *skb;
+       int err;
+
+       if (sock->ssap == 0)
+               return NULL;
+
+       skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
+                                       size + LLCP_HEADER_SIZE, &err);
+       if (skb == NULL) {
+               pr_err("Could not allocate PDU\n");
+               return NULL;
+       }
+
+       skb = llcp_add_header(skb, sock->dsap, sock->ssap, cmd);
+
+       return skb;
+}
+
+int nfc_llcp_disconnect(struct nfc_llcp_sock *sock)
+{
+       struct sk_buff *skb;
+       struct nfc_dev *dev;
+       struct nfc_llcp_local *local;
+       u16 size = 0;
+
+       pr_debug("Sending DISC\n");
+
+       local = sock->local;
+       if (local == NULL)
+               return -ENODEV;
+
+       dev = sock->dev;
+       if (dev == NULL)
+               return -ENODEV;
+
+       size += LLCP_HEADER_SIZE;
+       size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+       skb = alloc_skb(size, GFP_ATOMIC);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+       skb = llcp_add_header(skb, sock->ssap, sock->dsap, LLCP_PDU_DISC);
+
+       skb_queue_tail(&local->tx_queue, skb);
+
+       return 0;
+}
+
+int nfc_llcp_send_symm(struct nfc_dev *dev)
+{
+       struct sk_buff *skb;
+       struct nfc_llcp_local *local;
+       u16 size = 0;
+
+       pr_debug("Sending SYMM\n");
+
+       local = nfc_llcp_find_local(dev);
+       if (local == NULL)
+               return -ENODEV;
+
+       size += LLCP_HEADER_SIZE;
+       size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+       skb = alloc_skb(size, GFP_KERNEL);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+       skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
+
+       return nfc_data_exchange(dev, local->target_idx, skb,
+                                       nfc_llcp_recv, local);
+}
+
+int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+{
+       struct nfc_llcp_local *local;
+       struct sk_buff *skb;
+       u8 *service_name_tlv = NULL, service_name_tlv_length;
+       int err;
+       u16 size = 0;
+
+       pr_debug("Sending CONNECT\n");
+
+       local = sock->local;
+       if (local == NULL)
+               return -ENODEV;
+
+       if (sock->service_name != NULL) {
+               service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN,
+                                       sock->service_name,
+                                       sock->service_name_len,
+                                       &service_name_tlv_length);
+               size += service_name_tlv_length;
+       }
+
+       pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
+
+       skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size);
+       if (skb == NULL) {
+               err = -ENOMEM;
+               goto error_tlv;
+       }
+
+       if (service_name_tlv != NULL)
+               skb = llcp_add_tlv(skb, service_name_tlv,
+                                       service_name_tlv_length);
+
+       skb_queue_tail(&local->tx_queue, skb);
+
+       return 0;
+
+error_tlv:
+       pr_err("error %d\n", err);
+
+       kfree(service_name_tlv);
+
+       return err;
+}
+
+int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
+{
+       struct nfc_llcp_local *local;
+       struct sk_buff *skb;
+
+       pr_debug("Sending CC\n");
+
+       local = sock->local;
+       if (local == NULL)
+               return -ENODEV;
+
+       skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, 0);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       skb_queue_tail(&local->tx_queue, skb);
+
+       return 0;
+}
+
+int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
+{
+       struct sk_buff *skb;
+       struct nfc_dev *dev;
+       u16 size = 1; /* Reason code */
+
+       pr_debug("Sending DM reason 0x%x\n", reason);
+
+       if (local == NULL)
+               return -ENODEV;
+
+       dev = local->dev;
+       if (dev == NULL)
+               return -ENODEV;
+
+       size += LLCP_HEADER_SIZE;
+       size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+       skb = alloc_skb(size, GFP_KERNEL);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+       skb = llcp_add_header(skb, ssap, dsap, LLCP_PDU_DM);
+
+       memcpy(skb_put(skb, 1), &reason, 1);
+
+       skb_queue_head(&local->tx_queue, skb);
+
+       return 0;
+}
+
+int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock)
+{
+       struct sk_buff *skb;
+       struct nfc_llcp_local *local;
+
+       pr_debug("Send DISC\n");
+
+       local = sock->local;
+       if (local == NULL)
+               return -ENODEV;
+
+       skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       skb_queue_head(&local->tx_queue, skb);
+
+       return 0;
+}
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
new file mode 100644 (file)
index 0000000..67756b2
--- /dev/null
@@ -0,0 +1,973 @@
+/*
+ * Copyright (C) 2011  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "llcp: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/nfc.h>
+
+#include "../nfc.h"
+#include "llcp.h"
+
+static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
+
+static struct list_head llcp_devices;
+
+static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
+{
+       struct nfc_llcp_sock *parent, *s, *n;
+       struct sock *sk, *parent_sk;
+       int i;
+
+
+       mutex_lock(&local->socket_lock);
+
+       for (i = 0; i < LLCP_MAX_SAP; i++) {
+               parent = local->sockets[i];
+               if (parent == NULL)
+                       continue;
+
+               /* Release all child sockets */
+               list_for_each_entry_safe(s, n, &parent->list, list) {
+                       list_del(&s->list);
+                       sk = &s->sk;
+
+                       lock_sock(sk);
+
+                       if (sk->sk_state == LLCP_CONNECTED)
+                               nfc_put_device(s->dev);
+
+                       sk->sk_state = LLCP_CLOSED;
+                       sock_set_flag(sk, SOCK_DEAD);
+
+                       release_sock(sk);
+               }
+
+               parent_sk = &parent->sk;
+
+               lock_sock(parent_sk);
+
+               if (parent_sk->sk_state == LLCP_LISTEN) {
+                       struct nfc_llcp_sock *lsk, *n;
+                       struct sock *accept_sk;
+
+                       list_for_each_entry_safe(lsk, n, &parent->accept_queue,
+                                                               accept_queue) {
+                               accept_sk = &lsk->sk;
+                               lock_sock(accept_sk);
+
+                               nfc_llcp_accept_unlink(accept_sk);
+
+                               accept_sk->sk_state = LLCP_CLOSED;
+                               sock_set_flag(accept_sk, SOCK_DEAD);
+
+                               release_sock(accept_sk);
+
+                               sock_orphan(accept_sk);
+                       }
+               }
+
+               if (parent_sk->sk_state == LLCP_CONNECTED)
+                       nfc_put_device(parent->dev);
+
+               parent_sk->sk_state = LLCP_CLOSED;
+               sock_set_flag(parent_sk, SOCK_DEAD);
+
+               release_sock(parent_sk);
+       }
+
+       mutex_unlock(&local->socket_lock);
+}
+
+static void nfc_llcp_timeout_work(struct work_struct *work)
+{
+       struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
+                                                       timeout_work);
+
+       nfc_dep_link_down(local->dev);
+}
+
+static void nfc_llcp_symm_timer(unsigned long data)
+{
+       struct nfc_llcp_local *local = (struct nfc_llcp_local *) data;
+
+       pr_err("SYMM timeout\n");
+
+       queue_work(local->timeout_wq, &local->timeout_work);
+}
+
+struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
+{
+       struct nfc_llcp_local *local, *n;
+
+       list_for_each_entry_safe(local, n, &llcp_devices, list)
+               if (local->dev == dev)
+                       return local;
+
+       pr_debug("No device found\n");
+
+       return NULL;
+}
+
+static char *wks[] = {
+       NULL,
+       NULL, /* SDP */
+       "urn:nfc:sn:ip",
+       "urn:nfc:sn:obex",
+       "urn:nfc:sn:snep",
+};
+
+static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
+{
+       int sap, num_wks;
+
+       pr_debug("%s\n", service_name);
+
+       if (service_name == NULL)
+               return -EINVAL;
+
+       num_wks = ARRAY_SIZE(wks);
+
+       for (sap = 0 ; sap < num_wks; sap++) {
+               if (wks[sap] == NULL)
+                       continue;
+
+               if (strncmp(wks[sap], service_name, service_name_len) == 0)
+                       return sap;
+       }
+
+       return -EINVAL;
+}
+
+u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
+                               struct nfc_llcp_sock *sock)
+{
+       mutex_lock(&local->sdp_lock);
+
+       if (sock->service_name != NULL && sock->service_name_len > 0) {
+               int ssap = nfc_llcp_wks_sap(sock->service_name,
+                                               sock->service_name_len);
+
+               if (ssap > 0) {
+                       pr_debug("WKS %d\n", ssap);
+
+                       /* This is a WKS, let's check if it's free */
+                       if (local->local_wks & BIT(ssap)) {
+                               mutex_unlock(&local->sdp_lock);
+
+                               return LLCP_SAP_MAX;
+                       }
+
+                       set_bit(BIT(ssap), &local->local_wks);
+                       mutex_unlock(&local->sdp_lock);
+
+                       return ssap;
+               }
+
+               /*
+                * This is not a well known service,
+                * we should try to find a local SDP free spot
+                */
+               ssap = find_first_zero_bit(&local->local_sdp, LLCP_SDP_NUM_SAP);
+               if (ssap == LLCP_SDP_NUM_SAP) {
+                       mutex_unlock(&local->sdp_lock);
+
+                       return LLCP_SAP_MAX;
+               }
+
+               pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
+
+               set_bit(BIT(ssap), &local->local_sdp);
+               mutex_unlock(&local->sdp_lock);
+
+               return LLCP_WKS_NUM_SAP + ssap;
+
+       } else if (sock->ssap != 0) {
+               if (sock->ssap < LLCP_WKS_NUM_SAP) {
+                       if (!(local->local_wks & BIT(sock->ssap))) {
+                               set_bit(BIT(sock->ssap), &local->local_wks);
+                               mutex_unlock(&local->sdp_lock);
+
+                               return sock->ssap;
+                       }
+
+               } else if (sock->ssap < LLCP_SDP_NUM_SAP) {
+                       if (!(local->local_sdp &
+                               BIT(sock->ssap - LLCP_WKS_NUM_SAP))) {
+                               set_bit(BIT(sock->ssap - LLCP_WKS_NUM_SAP),
+                                                       &local->local_sdp);
+                               mutex_unlock(&local->sdp_lock);
+
+                               return sock->ssap;
+                       }
+               }
+       }
+
+       mutex_unlock(&local->sdp_lock);
+
+       return LLCP_SAP_MAX;
+}
+
+u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local)
+{
+       u8 local_ssap;
+
+       mutex_lock(&local->sdp_lock);
+
+       local_ssap = find_first_zero_bit(&local->local_sap, LLCP_LOCAL_NUM_SAP);
+       if (local_ssap == LLCP_LOCAL_NUM_SAP) {
+               mutex_unlock(&local->sdp_lock);
+               return LLCP_SAP_MAX;
+       }
+
+       set_bit(BIT(local_ssap), &local->local_sap);
+
+       mutex_unlock(&local->sdp_lock);
+
+       return local_ssap + LLCP_LOCAL_SAP_OFFSET;
+}
+
+void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap)
+{
+       u8 local_ssap;
+       unsigned long *sdp;
+
+       if (ssap < LLCP_WKS_NUM_SAP) {
+               local_ssap = ssap;
+               sdp = &local->local_wks;
+       } else if (ssap < LLCP_LOCAL_NUM_SAP) {
+               local_ssap = ssap - LLCP_WKS_NUM_SAP;
+               sdp = &local->local_sdp;
+       } else if (ssap < LLCP_MAX_SAP) {
+               local_ssap = ssap - LLCP_LOCAL_NUM_SAP;
+               sdp = &local->local_sap;
+       } else {
+               return;
+       }
+
+       mutex_lock(&local->sdp_lock);
+
+       clear_bit(1 << local_ssap, sdp);
+
+       mutex_unlock(&local->sdp_lock);
+}
+
+u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len)
+{
+       struct nfc_llcp_local *local;
+
+       local = nfc_llcp_find_local(dev);
+       if (local == NULL) {
+               *general_bytes_len = 0;
+               return NULL;
+       }
+
+       *general_bytes_len = local->gb_len;
+
+       return local->gb;
+}
+
+static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
+{
+       u8 *gb_cur, *version_tlv, version, version_length;
+       u8 *lto_tlv, lto, lto_length;
+       u8 *wks_tlv, wks_length;
+       u8 gb_len = 0;
+
+       version = LLCP_VERSION_11;
+       version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
+                                                       1, &version_length);
+       gb_len += version_length;
+
+       /* 1500 ms */
+       lto = 150;
+       lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &lto, 1, &lto_length);
+       gb_len += lto_length;
+
+       pr_debug("Local wks 0x%lx\n", local->local_wks);
+       wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&local->local_wks, 2,
+                                                               &wks_length);
+       gb_len += wks_length;
+
+       gb_len += ARRAY_SIZE(llcp_magic);
+
+       if (gb_len > NFC_MAX_GT_LEN) {
+               kfree(version_tlv);
+               return -EINVAL;
+       }
+
+       gb_cur = local->gb;
+
+       memcpy(gb_cur, llcp_magic, ARRAY_SIZE(llcp_magic));
+       gb_cur += ARRAY_SIZE(llcp_magic);
+
+       memcpy(gb_cur, version_tlv, version_length);
+       gb_cur += version_length;
+
+       memcpy(gb_cur, lto_tlv, lto_length);
+       gb_cur += lto_length;
+
+       memcpy(gb_cur, wks_tlv, wks_length);
+       gb_cur += wks_length;
+
+       kfree(version_tlv);
+       kfree(lto_tlv);
+
+       local->gb_len = gb_len;
+
+       return 0;
+}
+
+int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+{
+       struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
+
+       if (local == NULL) {
+               pr_err("No LLCP device\n");
+               return -ENODEV;
+       }
+
+       memset(local->remote_gb, 0, NFC_MAX_GT_LEN);
+       memcpy(local->remote_gb, gb, gb_len);
+       local->remote_gb_len = gb_len;
+
+       if (local->remote_gb == NULL ||
+                       local->remote_gb_len == 0)
+               return -ENODEV;
+
+       if (memcmp(local->remote_gb, llcp_magic, 3)) {
+               pr_err("MAC does not support LLCP\n");
+               return -EINVAL;
+       }
+
+       return nfc_llcp_parse_tlv(local,
+                       &local->remote_gb[3], local->remote_gb_len - 3);
+}
+
+static void nfc_llcp_tx_work(struct work_struct *work)
+{
+       struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
+                                                       tx_work);
+       struct sk_buff *skb;
+
+       skb = skb_dequeue(&local->tx_queue);
+       if (skb != NULL) {
+               pr_debug("Sending pending skb\n");
+               nfc_data_exchange(local->dev, local->target_idx,
+                                       skb, nfc_llcp_recv, local);
+       } else {
+               nfc_llcp_send_symm(local->dev);
+       }
+
+       mod_timer(&local->link_timer,
+                       jiffies + msecs_to_jiffies(local->remote_lto));
+}
+
+static u8 nfc_llcp_dsap(struct sk_buff *pdu)
+{
+       return (pdu->data[0] & 0xfc) >> 2;
+}
+
+static u8 nfc_llcp_ptype(struct sk_buff *pdu)
+{
+       return ((pdu->data[0] & 0x03) << 2) | ((pdu->data[1] & 0xc0) >> 6);
+}
+
+static u8 nfc_llcp_ssap(struct sk_buff *pdu)
+{
+       return pdu->data[1] & 0x3f;
+}
+
+static u8 nfc_llcp_ns(struct sk_buff *pdu)
+{
+       return pdu->data[2] >> 4;
+}
+
+static u8 nfc_llcp_nr(struct sk_buff *pdu)
+{
+       return pdu->data[2] & 0xf;
+}
+
+static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
+{
+       pdu->data[2] = (sock->send_n << 4) | ((sock->recv_n - 1) % 16);
+       sock->send_n = (sock->send_n + 1) % 16;
+       sock->recv_ack_n = (sock->recv_n - 1) % 16;
+}
+
+static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
+                                               u8 ssap, u8 dsap)
+{
+       struct nfc_llcp_sock *sock, *llcp_sock, *n;
+
+       if (ssap == 0 && dsap == 0)
+               return NULL;
+
+       mutex_lock(&local->socket_lock);
+       sock = local->sockets[ssap];
+       if (sock == NULL) {
+               mutex_unlock(&local->socket_lock);
+               return NULL;
+       }
+
+       pr_debug("root dsap %d (%d)\n", sock->dsap, dsap);
+
+       if (sock->dsap == dsap) {
+               sock_hold(&sock->sk);
+               mutex_unlock(&local->socket_lock);
+               return sock;
+       }
+
+       list_for_each_entry_safe(llcp_sock, n, &sock->list, list) {
+               pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock,
+                               &llcp_sock->sk, llcp_sock->dsap);
+               if (llcp_sock->dsap == dsap) {
+                       sock_hold(&llcp_sock->sk);
+                       mutex_unlock(&local->socket_lock);
+                       return llcp_sock;
+               }
+       }
+
+       pr_err("Could not find socket for %d %d\n", ssap, dsap);
+
+       mutex_unlock(&local->socket_lock);
+
+       return NULL;
+}
+
+static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock)
+{
+       sock_put(&sock->sk);
+}
+
+static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len)
+{
+       u8 *tlv = &skb->data[2], type, length;
+       size_t tlv_array_len = skb->len - LLCP_HEADER_SIZE, offset = 0;
+
+       while (offset < tlv_array_len) {
+               type = tlv[0];
+               length = tlv[1];
+
+               pr_debug("type 0x%x length %d\n", type, length);
+
+               if (type == LLCP_TLV_SN) {
+                       *sn_len = length;
+                       return &tlv[2];
+               }
+
+               offset += length + 2;
+               tlv += length + 2;
+       }
+
+       return NULL;
+}
+
+static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
+                               struct sk_buff *skb)
+{
+       struct sock *new_sk, *parent;
+       struct nfc_llcp_sock *sock, *new_sock;
+       u8 dsap, ssap, bound_sap, reason;
+
+       dsap = nfc_llcp_dsap(skb);
+       ssap = nfc_llcp_ssap(skb);
+
+       pr_debug("%d %d\n", dsap, ssap);
+
+       nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
+                               skb->len - LLCP_HEADER_SIZE);
+
+       if (dsap != LLCP_SAP_SDP) {
+               bound_sap = dsap;
+
+               mutex_lock(&local->socket_lock);
+               sock = local->sockets[dsap];
+               if (sock == NULL) {
+                       mutex_unlock(&local->socket_lock);
+                       reason = LLCP_DM_NOBOUND;
+                       goto fail;
+               }
+
+               sock_hold(&sock->sk);
+               mutex_unlock(&local->socket_lock);
+
+               lock_sock(&sock->sk);
+
+               if (sock->dsap == LLCP_SAP_SDP &&
+                               sock->sk.sk_state == LLCP_LISTEN)
+                       goto enqueue;
+       } else {
+               u8 *sn;
+               size_t sn_len;
+
+               sn = nfc_llcp_connect_sn(skb, &sn_len);
+               if (sn == NULL) {
+                       reason = LLCP_DM_NOBOUND;
+                       goto fail;
+               }
+
+               pr_debug("Service name length %zu\n", sn_len);
+
+               mutex_lock(&local->socket_lock);
+               for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET;
+                                                               bound_sap++) {
+                       sock = local->sockets[bound_sap];
+                       if (sock == NULL)
+                               continue;
+
+                       if (sock->service_name == NULL ||
+                               sock->service_name_len == 0)
+                                       continue;
+
+                       if (sock->service_name_len != sn_len)
+                               continue;
+
+                       if (sock->dsap == LLCP_SAP_SDP &&
+                                       sock->sk.sk_state == LLCP_LISTEN &&
+                                       !memcmp(sn, sock->service_name, sn_len)) {
+                               pr_debug("Found service name at SAP %d\n",
+                                                               bound_sap);
+                               sock_hold(&sock->sk);
+                               mutex_unlock(&local->socket_lock);
+
+                               lock_sock(&sock->sk);
+
+                               goto enqueue;
+                       }
+               }
+
+       }
+
+       mutex_unlock(&local->socket_lock);
+
+       reason = LLCP_DM_NOBOUND;
+       goto fail;
+
+enqueue:
+       parent = &sock->sk;
+
+       if (sk_acceptq_is_full(parent)) {
+               reason = LLCP_DM_REJ;
+               release_sock(&sock->sk);
+               sock_put(&sock->sk);
+               goto fail;
+       }
+
+       new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type,
+                                    GFP_ATOMIC);
+       if (new_sk == NULL) {
+               reason = LLCP_DM_REJ;
+               release_sock(&sock->sk);
+               sock_put(&sock->sk);
+               goto fail;
+       }
+
+       new_sock = nfc_llcp_sock(new_sk);
+       new_sock->dev = local->dev;
+       new_sock->local = local;
+       new_sock->nfc_protocol = sock->nfc_protocol;
+       new_sock->ssap = bound_sap;
+       new_sock->dsap = ssap;
+       new_sock->parent = parent;
+
+       pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk);
+
+       list_add_tail(&new_sock->list, &sock->list);
+
+       nfc_llcp_accept_enqueue(&sock->sk, new_sk);
+
+       nfc_get_device(local->dev->idx);
+
+       new_sk->sk_state = LLCP_CONNECTED;
+
+       /* Wake the listening processes */
+       parent->sk_data_ready(parent, 0);
+
+       /* Send CC */
+       nfc_llcp_send_cc(new_sock);
+
+       release_sock(&sock->sk);
+       sock_put(&sock->sk);
+
+       return;
+
+fail:
+       /* Send DM */
+       nfc_llcp_send_dm(local, dsap, ssap, reason);
+
+       return;
+
+}
+
+static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
+                               struct sk_buff *skb)
+{
+       struct nfc_llcp_sock *llcp_sock;
+       struct sock *sk;
+       u8 dsap, ssap, ptype, ns, nr;
+
+       ptype = nfc_llcp_ptype(skb);
+       dsap = nfc_llcp_dsap(skb);
+       ssap = nfc_llcp_ssap(skb);
+       ns = nfc_llcp_ns(skb);
+       nr = nfc_llcp_nr(skb);
+
+       pr_debug("%d %d R %d S %d\n", dsap, ssap, nr, ns);
+
+       llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
+       if (llcp_sock == NULL) {
+               nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
+               return;
+       }
+
+       sk = &llcp_sock->sk;
+       lock_sock(sk);
+       if (sk->sk_state == LLCP_CLOSED) {
+               release_sock(sk);
+               nfc_llcp_sock_put(llcp_sock);
+       }
+
+       if (ns == llcp_sock->recv_n)
+               llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16;
+       else
+               pr_err("Received out of sequence I PDU\n");
+
+       /* Pass the payload upstream */
+       if (ptype == LLCP_PDU_I) {
+               pr_debug("I frame, queueing on %p\n", &llcp_sock->sk);
+
+               skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE);
+               if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
+                       pr_err("receive queue is full\n");
+                       skb_queue_head(&llcp_sock->tx_backlog_queue, skb);
+               }
+       }
+
+       /* Remove skbs from the pending queue */
+       if (llcp_sock->send_ack_n != nr) {
+               struct sk_buff *s, *tmp;
+
+               llcp_sock->send_ack_n = nr;
+
+               skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp)
+                       if (nfc_llcp_ns(s) <= nr) {
+                               skb_unlink(s, &llcp_sock->tx_pending_queue);
+                               kfree_skb(s);
+                       }
+       }
+
+       /* Queue some I frames for transmission */
+       while (llcp_sock->remote_ready &&
+               skb_queue_len(&llcp_sock->tx_pending_queue) <= local->remote_rw) {
+               struct sk_buff *pdu, *pending_pdu;
+
+               pdu = skb_dequeue(&llcp_sock->tx_queue);
+               if (pdu == NULL)
+                       break;
+
+               /* Update N(S)/N(R) */
+               nfc_llcp_set_nrns(llcp_sock, pdu);
+
+               pending_pdu = skb_clone(pdu, GFP_KERNEL);
+
+               skb_queue_tail(&local->tx_queue, pdu);
+               skb_queue_tail(&llcp_sock->tx_pending_queue, pending_pdu);
+       }
+
+       release_sock(sk);
+       nfc_llcp_sock_put(llcp_sock);
+}
+
+static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
+                               struct sk_buff *skb)
+{
+       struct nfc_llcp_sock *llcp_sock;
+       struct sock *sk;
+       u8 dsap, ssap;
+
+       dsap = nfc_llcp_dsap(skb);
+       ssap = nfc_llcp_ssap(skb);
+
+       llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
+       if (llcp_sock == NULL) {
+               nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
+               return;
+       }
+
+       sk = &llcp_sock->sk;
+       lock_sock(sk);
+       if (sk->sk_state == LLCP_CLOSED) {
+               release_sock(sk);
+               nfc_llcp_sock_put(llcp_sock);
+       }
+
+
+       if (sk->sk_state == LLCP_CONNECTED) {
+               nfc_put_device(local->dev);
+               sk->sk_state = LLCP_CLOSED;
+               sk->sk_state_change(sk);
+       }
+
+       nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_DISC);
+
+       release_sock(sk);
+       nfc_llcp_sock_put(llcp_sock);
+}
+
+static void nfc_llcp_recv_cc(struct nfc_llcp_local *local,
+                               struct sk_buff *skb)
+{
+       struct nfc_llcp_sock *llcp_sock;
+       u8 dsap, ssap;
+
+
+       dsap = nfc_llcp_dsap(skb);
+       ssap = nfc_llcp_ssap(skb);
+
+       llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
+
+       if (llcp_sock == NULL)
+               llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
+
+       if (llcp_sock == NULL) {
+               pr_err("Invalid CC\n");
+               nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
+
+               return;
+       }
+
+       llcp_sock->dsap = ssap;
+
+       nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
+                               skb->len - LLCP_HEADER_SIZE);
+
+       nfc_llcp_sock_put(llcp_sock);
+}
+
+static void nfc_llcp_rx_work(struct work_struct *work)
+{
+       struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
+                                                               rx_work);
+       u8 dsap, ssap, ptype;
+       struct sk_buff *skb;
+
+       skb = local->rx_pending;
+       if (skb == NULL) {
+               pr_debug("No pending SKB\n");
+               return;
+       }
+
+       ptype = nfc_llcp_ptype(skb);
+       dsap = nfc_llcp_dsap(skb);
+       ssap = nfc_llcp_ssap(skb);
+
+       pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap);
+
+       switch (ptype) {
+       case LLCP_PDU_SYMM:
+               pr_debug("SYMM\n");
+               break;
+
+       case LLCP_PDU_CONNECT:
+               pr_debug("CONNECT\n");
+               nfc_llcp_recv_connect(local, skb);
+               break;
+
+       case LLCP_PDU_DISC:
+               pr_debug("DISC\n");
+               nfc_llcp_recv_disc(local, skb);
+               break;
+
+       case LLCP_PDU_CC:
+               pr_debug("CC\n");
+               nfc_llcp_recv_cc(local, skb);
+               break;
+
+       case LLCP_PDU_I:
+       case LLCP_PDU_RR:
+               pr_debug("I frame\n");
+               nfc_llcp_recv_hdlc(local, skb);
+               break;
+
+       }
+
+       queue_work(local->tx_wq, &local->tx_work);
+       kfree_skb(local->rx_pending);
+       local->rx_pending = NULL;
+
+       return;
+}
+
+void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
+{
+       struct nfc_llcp_local *local = (struct nfc_llcp_local *) data;
+
+       pr_debug("Received an LLCP PDU\n");
+       if (err < 0) {
+               pr_err("err %d", err);
+               return;
+       }
+
+       local->rx_pending = skb_get(skb);
+       del_timer(&local->link_timer);
+       queue_work(local->rx_wq, &local->rx_work);
+
+       return;
+}
+
+void nfc_llcp_mac_is_down(struct nfc_dev *dev)
+{
+       struct nfc_llcp_local *local;
+
+       local = nfc_llcp_find_local(dev);
+       if (local == NULL)
+               return;
+
+       /* Close and purge all existing sockets */
+       nfc_llcp_socket_release(local);
+}
+
+void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+                       u8 comm_mode, u8 rf_mode)
+{
+       struct nfc_llcp_local *local;
+
+       pr_debug("rf mode %d\n", rf_mode);
+
+       local = nfc_llcp_find_local(dev);
+       if (local == NULL)
+               return;
+
+       local->target_idx = target_idx;
+       local->comm_mode = comm_mode;
+       local->rf_mode = rf_mode;
+
+       if (rf_mode == NFC_RF_INITIATOR) {
+               pr_debug("Queueing Tx work\n");
+
+               queue_work(local->tx_wq, &local->tx_work);
+       } else {
+               mod_timer(&local->link_timer,
+                       jiffies + msecs_to_jiffies(local->remote_lto));
+       }
+}
+
+int nfc_llcp_register_device(struct nfc_dev *ndev)
+{
+       struct device *dev = &ndev->dev;
+       struct nfc_llcp_local *local;
+       char name[32];
+       int err;
+
+       local = kzalloc(sizeof(struct nfc_llcp_local), GFP_KERNEL);
+       if (local == NULL)
+               return -ENOMEM;
+
+       local->dev = ndev;
+       INIT_LIST_HEAD(&local->list);
+       mutex_init(&local->sdp_lock);
+       mutex_init(&local->socket_lock);
+       init_timer(&local->link_timer);
+       local->link_timer.data = (unsigned long) local;
+       local->link_timer.function = nfc_llcp_symm_timer;
+
+       skb_queue_head_init(&local->tx_queue);
+       INIT_WORK(&local->tx_work, nfc_llcp_tx_work);
+       snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev));
+       local->tx_wq = alloc_workqueue(name,
+                       WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+       if (local->tx_wq == NULL) {
+               err = -ENOMEM;
+               goto err_local;
+       }
+
+       local->rx_pending = NULL;
+       INIT_WORK(&local->rx_work, nfc_llcp_rx_work);
+       snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev));
+       local->rx_wq = alloc_workqueue(name,
+                       WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+       if (local->rx_wq == NULL) {
+               err = -ENOMEM;
+               goto err_tx_wq;
+       }
+
+       INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work);
+       snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev));
+       local->timeout_wq = alloc_workqueue(name,
+                       WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+       if (local->timeout_wq == NULL) {
+               err = -ENOMEM;
+               goto err_rx_wq;
+       }
+
+       nfc_llcp_build_gb(local);
+
+       local->remote_miu = LLCP_DEFAULT_MIU;
+       local->remote_lto = LLCP_DEFAULT_LTO;
+       local->remote_rw = LLCP_DEFAULT_RW;
+
+       list_add(&llcp_devices, &local->list);
+
+       return 0;
+
+err_rx_wq:
+       destroy_workqueue(local->rx_wq);
+
+err_tx_wq:
+       destroy_workqueue(local->tx_wq);
+
+err_local:
+       kfree(local);
+
+       return 0;
+}
+
+void nfc_llcp_unregister_device(struct nfc_dev *dev)
+{
+       struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
+
+       if (local == NULL) {
+               pr_debug("No such device\n");
+               return;
+       }
+
+       list_del(&local->list);
+       nfc_llcp_socket_release(local);
+       del_timer_sync(&local->link_timer);
+       skb_queue_purge(&local->tx_queue);
+       destroy_workqueue(local->tx_wq);
+       destroy_workqueue(local->rx_wq);
+       kfree(local->rx_pending);
+       kfree(local);
+}
+
+int __init nfc_llcp_init(void)
+{
+       INIT_LIST_HEAD(&llcp_devices);
+
+       return nfc_llcp_sock_init();
+}
+
+void nfc_llcp_exit(void)
+{
+       nfc_llcp_sock_exit();
+}
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
new file mode 100644 (file)
index 0000000..0ad2e33
--- /dev/null
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2011  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+enum llcp_state {
+       LLCP_CONNECTED = 1, /* wait_for_packet() wants that */
+       LLCP_CLOSED,
+       LLCP_BOUND,
+       LLCP_LISTEN,
+};
+
+#define LLCP_DEFAULT_LTO 100
+#define LLCP_DEFAULT_RW  1
+#define LLCP_DEFAULT_MIU 128
+
+#define LLCP_WKS_NUM_SAP   16
+#define LLCP_SDP_NUM_SAP   16
+#define LLCP_LOCAL_NUM_SAP 32
+#define LLCP_LOCAL_SAP_OFFSET (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP)
+#define LLCP_MAX_SAP (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP + LLCP_LOCAL_NUM_SAP)
+
+struct nfc_llcp_sock;
+
+struct nfc_llcp_local {
+       struct list_head list;
+       struct nfc_dev *dev;
+
+       struct mutex sdp_lock;
+       struct mutex socket_lock;
+
+       struct timer_list link_timer;
+       struct sk_buff_head tx_queue;
+       struct workqueue_struct *tx_wq;
+       struct work_struct       tx_work;
+       struct workqueue_struct *rx_wq;
+       struct work_struct       rx_work;
+       struct sk_buff *rx_pending;
+       struct workqueue_struct *timeout_wq;
+       struct work_struct       timeout_work;
+
+       u32 target_idx;
+       u8 rf_mode;
+       u8 comm_mode;
+       unsigned long local_wks;      /* Well known services */
+       unsigned long local_sdp;      /* Local services  */
+       unsigned long local_sap; /* Local SAPs, not available for discovery */
+
+       /* local */
+       u8 gb[NFC_MAX_GT_LEN];
+       u8 gb_len;
+
+       /* remote */
+       u8 remote_gb[NFC_MAX_GT_LEN];
+       u8 remote_gb_len;
+
+       u8  remote_version;
+       u16 remote_miu;
+       u16 remote_lto;
+       u8  remote_opt;
+       u16 remote_wks;
+       u8  remote_rw;
+
+       /* sockets array */
+       struct nfc_llcp_sock *sockets[LLCP_MAX_SAP];
+};
+
+struct nfc_llcp_sock {
+       struct sock sk;
+       struct list_head list;
+       struct nfc_dev *dev;
+       struct nfc_llcp_local *local;
+       u32 target_idx;
+       u32 nfc_protocol;
+
+       u8 ssap;
+       u8 dsap;
+       char *service_name;
+       size_t service_name_len;
+
+       /* Link variables */
+       u8 send_n;
+       u8 send_ack_n;
+       u8 recv_n;
+       u8 recv_ack_n;
+
+       /* Is the remote peer ready to receive */
+       u8 remote_ready;
+
+       struct sk_buff_head tx_queue;
+       struct sk_buff_head tx_pending_queue;
+       struct sk_buff_head tx_backlog_queue;
+
+       struct list_head accept_queue;
+       struct sock *parent;
+};
+
+#define nfc_llcp_sock(sk) ((struct nfc_llcp_sock *) (sk))
+#define nfc_llcp_dev(sk)  (nfc_llcp_sock((sk))->dev)
+
+#define LLCP_HEADER_SIZE   2
+#define LLCP_SEQUENCE_SIZE 1
+
+/* LLCP versions: 1.1 is 1.0 plus SDP */
+#define LLCP_VERSION_10 0x10
+#define LLCP_VERSION_11 0x11
+
+/* LLCP PDU types */
+#define LLCP_PDU_SYMM     0x0
+#define LLCP_PDU_PAX      0x1
+#define LLCP_PDU_AGF      0x2
+#define LLCP_PDU_UI       0x3
+#define LLCP_PDU_CONNECT  0x4
+#define LLCP_PDU_DISC     0x5
+#define LLCP_PDU_CC       0x6
+#define LLCP_PDU_DM       0x7
+#define LLCP_PDU_FRMR     0x8
+#define LLCP_PDU_SNL      0x9
+#define LLCP_PDU_I        0xc
+#define LLCP_PDU_RR       0xd
+#define LLCP_PDU_RNR      0xe
+
+/* Parameters TLV types */
+#define LLCP_TLV_VERSION 0x1
+#define LLCP_TLV_MIUX    0x2
+#define LLCP_TLV_WKS     0x3
+#define LLCP_TLV_LTO     0x4
+#define LLCP_TLV_RW      0x5
+#define LLCP_TLV_SN      0x6
+#define LLCP_TLV_OPT     0x7
+#define LLCP_TLV_SDREQ   0x8
+#define LLCP_TLV_SDRES   0x9
+#define LLCP_TLV_MAX     0xa
+
+/* Well known LLCP SAP */
+#define LLCP_SAP_SDP   0x1
+#define LLCP_SAP_IP    0x2
+#define LLCP_SAP_OBEX  0x3
+#define LLCP_SAP_SNEP  0x4
+#define LLCP_SAP_MAX   0xff
+
+/* Disconnection reason code */
+#define LLCP_DM_DISC    0x00
+#define LLCP_DM_NOCONN  0x01
+#define LLCP_DM_NOBOUND 0x02
+#define LLCP_DM_REJ     0x03
+
+
+struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
+u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
+                               struct nfc_llcp_sock *sock);
+u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
+void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap);
+
+/* Sock API */
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
+void nfc_llcp_sock_free(struct nfc_llcp_sock *sock);
+void nfc_llcp_accept_unlink(struct sock *sk);
+void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk);
+struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock);
+
+/* TLV API */
+int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
+                       u8 *tlv_array, u16 tlv_array_len);
+
+/* Commands API */
+void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
+u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length);
+void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
+int nfc_llcp_disconnect(struct nfc_llcp_sock *sock);
+int nfc_llcp_send_symm(struct nfc_dev *dev);
+int nfc_llcp_send_connect(struct nfc_llcp_sock *sock);
+int nfc_llcp_send_cc(struct nfc_llcp_sock *sock);
+int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason);
+int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock);
+
+/* Socket API */
+int __init nfc_llcp_sock_init(void);
+void nfc_llcp_sock_exit(void);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
new file mode 100644 (file)
index 0000000..f738ccd
--- /dev/null
@@ -0,0 +1,675 @@
+/*
+ * Copyright (C) 2011  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "llcp: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+
+#include "../nfc.h"
+#include "llcp.h"
+
+static struct proto llcp_sock_proto = {
+       .name     = "NFC_LLCP",
+       .owner    = THIS_MODULE,
+       .obj_size = sizeof(struct nfc_llcp_sock),
+};
+
+static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+{
+       struct sock *sk = sock->sk;
+       struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+       struct nfc_llcp_local *local;
+       struct nfc_dev *dev;
+       struct sockaddr_nfc_llcp llcp_addr;
+       int len, ret = 0;
+
+       pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
+
+       if (!addr || addr->sa_family != AF_NFC)
+               return -EINVAL;
+
+       memset(&llcp_addr, 0, sizeof(llcp_addr));
+       len = min_t(unsigned int, sizeof(llcp_addr), alen);
+       memcpy(&llcp_addr, addr, len);
+
+       /* This is going to be a listening socket, dsap must be 0 */
+       if (llcp_addr.dsap != 0)
+               return -EINVAL;
+
+       lock_sock(sk);
+
+       if (sk->sk_state != LLCP_CLOSED) {
+               ret = -EBADFD;
+               goto error;
+       }
+
+       dev = nfc_get_device(llcp_addr.dev_idx);
+       if (dev == NULL) {
+               ret = -ENODEV;
+               goto error;
+       }
+
+       local = nfc_llcp_find_local(dev);
+       if (local == NULL) {
+               ret = -ENODEV;
+               goto put_dev;
+       }
+
+       llcp_sock->dev = dev;
+       llcp_sock->local = local;
+       llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
+       llcp_sock->service_name_len = min_t(unsigned int,
+                       llcp_addr.service_name_len, NFC_LLCP_MAX_SERVICE_NAME);
+       llcp_sock->service_name = kmemdup(llcp_addr.service_name,
+                               llcp_sock->service_name_len, GFP_KERNEL);
+
+       llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
+       if (llcp_sock->ssap == LLCP_MAX_SAP)
+               goto put_dev;
+
+       local->sockets[llcp_sock->ssap] = llcp_sock;
+
+       pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap);
+
+       sk->sk_state = LLCP_BOUND;
+
+put_dev:
+       nfc_put_device(dev);
+
+error:
+       release_sock(sk);
+       return ret;
+}
+
+static int llcp_sock_listen(struct socket *sock, int backlog)
+{
+       struct sock *sk = sock->sk;
+       int ret = 0;
+
+       pr_debug("sk %p backlog %d\n", sk, backlog);
+
+       lock_sock(sk);
+
+       if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
+                       || sk->sk_state != LLCP_BOUND) {
+               ret = -EBADFD;
+               goto error;
+       }
+
+       sk->sk_max_ack_backlog = backlog;
+       sk->sk_ack_backlog = 0;
+
+       pr_debug("Socket listening\n");
+       sk->sk_state = LLCP_LISTEN;
+
+error:
+       release_sock(sk);
+
+       return ret;
+}
+
+void nfc_llcp_accept_unlink(struct sock *sk)
+{
+       struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+       pr_debug("state %d\n", sk->sk_state);
+
+       list_del_init(&llcp_sock->accept_queue);
+       sk_acceptq_removed(llcp_sock->parent);
+       llcp_sock->parent = NULL;
+
+       sock_put(sk);
+}
+
+void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk)
+{
+       struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+       struct nfc_llcp_sock *llcp_sock_parent = nfc_llcp_sock(parent);
+
+       /* Lock will be free from unlink */
+       sock_hold(sk);
+
+       list_add_tail(&llcp_sock->accept_queue,
+                       &llcp_sock_parent->accept_queue);
+       llcp_sock->parent = parent;
+       sk_acceptq_added(parent);
+}
+
+struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
+                                       struct socket *newsock)
+{
+       struct nfc_llcp_sock *lsk, *n, *llcp_parent;
+       struct sock *sk;
+
+       llcp_parent = nfc_llcp_sock(parent);
+
+       list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue,
+                                                       accept_queue) {
+               sk = &lsk->sk;
+               lock_sock(sk);
+
+               if (sk->sk_state == LLCP_CLOSED) {
+                       release_sock(sk);
+                       nfc_llcp_accept_unlink(sk);
+                       continue;
+               }
+
+               if (sk->sk_state == LLCP_CONNECTED || !newsock) {
+                       nfc_llcp_accept_unlink(sk);
+                       if (newsock)
+                               sock_graft(sk, newsock);
+
+                       release_sock(sk);
+
+                       pr_debug("Returning sk state %d\n", sk->sk_state);
+
+                       return sk;
+               }
+
+               release_sock(sk);
+       }
+
+       return NULL;
+}
+
+static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
+                                                               int flags)
+{
+       DECLARE_WAITQUEUE(wait, current);
+       struct sock *sk = sock->sk, *new_sk;
+       long timeo;
+       int ret = 0;
+
+       pr_debug("parent %p\n", sk);
+
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+       if (sk->sk_state != LLCP_LISTEN) {
+               ret = -EBADFD;
+               goto error;
+       }
+
+       timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+
+       /* Wait for an incoming connection. */
+       add_wait_queue_exclusive(sk_sleep(sk), &wait);
+       while (!(new_sk = nfc_llcp_accept_dequeue(sk, newsock))) {
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               if (!timeo) {
+                       ret = -EAGAIN;
+                       break;
+               }
+
+               if (signal_pending(current)) {
+                       ret = sock_intr_errno(timeo);
+                       break;
+               }
+
+               release_sock(sk);
+               timeo = schedule_timeout(timeo);
+               lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+       }
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(sk_sleep(sk), &wait);
+
+       if (ret)
+               goto error;
+
+       newsock->state = SS_CONNECTED;
+
+       pr_debug("new socket %p\n", new_sk);
+
+error:
+       release_sock(sk);
+
+       return ret;
+}
+
+static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
+                            int *len, int peer)
+{
+       struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *) addr;
+       struct sock *sk = sock->sk;
+       struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+       pr_debug("%p\n", sk);
+
+       addr->sa_family = AF_NFC;
+       *len = sizeof(struct sockaddr_nfc_llcp);
+
+       llcp_addr->dev_idx = llcp_sock->dev->idx;
+       llcp_addr->dsap = llcp_sock->dsap;
+       llcp_addr->ssap = llcp_sock->ssap;
+       llcp_addr->service_name_len = llcp_sock->service_name_len;
+       memcpy(llcp_addr->service_name, llcp_sock->service_name,
+                                       llcp_addr->service_name_len);
+
+       return 0;
+}
+
+static inline unsigned int llcp_accept_poll(struct sock *parent)
+{
+       struct nfc_llcp_sock *llcp_sock, *n, *parent_sock;
+       struct sock *sk;
+
+       parent_sock = nfc_llcp_sock(parent);
+
+       list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue,
+                                                               accept_queue) {
+               sk = &llcp_sock->sk;
+
+               if (sk->sk_state == LLCP_CONNECTED)
+                       return POLLIN | POLLRDNORM;
+       }
+
+       return 0;
+}
+
+static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
+                                                       poll_table *wait)
+{
+       struct sock *sk = sock->sk;
+       unsigned int mask = 0;
+
+       pr_debug("%p\n", sk);
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
+       if (sk->sk_state == LLCP_LISTEN)
+               return llcp_accept_poll(sk);
+
+       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+               mask |= POLLERR;
+
+       if (!skb_queue_empty(&sk->sk_receive_queue))
+               mask |= POLLIN;
+
+       if (sk->sk_state == LLCP_CLOSED)
+               mask |= POLLHUP;
+
+       return mask;
+}
+
+static int llcp_sock_release(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+       struct nfc_llcp_local *local;
+       struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+       if (!sk)
+               return 0;
+
+       pr_debug("%p\n", sk);
+
+       local = llcp_sock->local;
+       if (local == NULL)
+               return -ENODEV;
+
+       mutex_lock(&local->socket_lock);
+
+       if (llcp_sock == local->sockets[llcp_sock->ssap]) {
+               local->sockets[llcp_sock->ssap] = NULL;
+       } else {
+               struct nfc_llcp_sock *parent, *s, *n;
+
+               parent = local->sockets[llcp_sock->ssap];
+
+               list_for_each_entry_safe(s, n, &parent->list, list)
+                       if (llcp_sock == s) {
+                               list_del(&s->list);
+                               break;
+                       }
+
+       }
+
+       mutex_unlock(&local->socket_lock);
+
+       lock_sock(sk);
+
+       /* Send a DISC */
+       if (sk->sk_state == LLCP_CONNECTED)
+               nfc_llcp_disconnect(llcp_sock);
+
+       if (sk->sk_state == LLCP_LISTEN) {
+               struct nfc_llcp_sock *lsk, *n;
+               struct sock *accept_sk;
+
+               list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
+                                                               accept_queue) {
+                       accept_sk = &lsk->sk;
+                       lock_sock(accept_sk);
+
+                       nfc_llcp_disconnect(lsk);
+                       nfc_llcp_accept_unlink(accept_sk);
+
+                       release_sock(accept_sk);
+
+                       sock_set_flag(sk, SOCK_DEAD);
+                       sock_orphan(accept_sk);
+                       sock_put(accept_sk);
+               }
+       }
+
+       /* Freeing the SAP */
+       if ((sk->sk_state == LLCP_CONNECTED
+                       && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
+           sk->sk_state == LLCP_BOUND ||
+           sk->sk_state == LLCP_LISTEN)
+               nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
+
+       sock_set_flag(sk, SOCK_DEAD);
+
+       release_sock(sk);
+
+       sock_orphan(sk);
+       sock_put(sk);
+
+       return 0;
+}
+
+static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
+                                                       int len, int flags)
+{
+       struct sock *sk = sock->sk;
+       struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+       struct sockaddr_nfc_llcp *addr = (struct sockaddr_nfc_llcp *)_addr;
+       struct nfc_dev *dev;
+       struct nfc_llcp_local *local;
+       int ret = 0;
+
+       pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
+
+       if (!addr || len < sizeof(struct sockaddr_nfc) ||
+                       addr->sa_family != AF_NFC) {
+               pr_err("Invalid socket\n");
+               return -EINVAL;
+       }
+
+       if (addr->service_name_len == 0 && addr->dsap == 0) {
+               pr_err("Missing service name or dsap\n");
+               return -EINVAL;
+       }
+
+       pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx,
+                                       addr->target_idx, addr->nfc_protocol);
+
+       lock_sock(sk);
+
+       if (sk->sk_state == LLCP_CONNECTED) {
+               ret = -EISCONN;
+               goto error;
+       }
+
+       dev = nfc_get_device(addr->dev_idx);
+       if (dev == NULL) {
+               ret = -ENODEV;
+               goto error;
+       }
+
+       local = nfc_llcp_find_local(dev);
+       if (local == NULL) {
+               ret = -ENODEV;
+               goto put_dev;
+       }
+
+       device_lock(&dev->dev);
+       if (dev->dep_link_up == false) {
+               ret = -ENOLINK;
+               device_unlock(&dev->dev);
+               goto put_dev;
+       }
+       device_unlock(&dev->dev);
+
+       if (local->rf_mode == NFC_RF_INITIATOR &&
+                       addr->target_idx != local->target_idx) {
+               ret = -ENOLINK;
+               goto put_dev;
+       }
+
+       llcp_sock->dev = dev;
+       llcp_sock->local = local;
+       llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
+       if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               ret = -ENOMEM;
+               goto put_dev;
+       }
+       if (addr->service_name_len == 0)
+               llcp_sock->dsap = addr->dsap;
+       else
+               llcp_sock->dsap = LLCP_SAP_SDP;
+       llcp_sock->nfc_protocol = addr->nfc_protocol;
+       llcp_sock->service_name_len = min_t(unsigned int,
+                       addr->service_name_len, NFC_LLCP_MAX_SERVICE_NAME);
+       llcp_sock->service_name = kmemdup(addr->service_name,
+                                llcp_sock->service_name_len, GFP_KERNEL);
+
+       local->sockets[llcp_sock->ssap] = llcp_sock;
+
+       ret = nfc_llcp_send_connect(llcp_sock);
+       if (ret)
+               goto put_dev;
+
+       sk->sk_state = LLCP_CONNECTED;
+
+       release_sock(sk);
+       return 0;
+
+put_dev:
+       nfc_put_device(dev);
+
+error:
+       release_sock(sk);
+       return ret;
+}
+
+static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+                            struct msghdr *msg, size_t len, int flags)
+{
+       int noblock = flags & MSG_DONTWAIT;
+       struct sock *sk = sock->sk;
+       unsigned int copied, rlen;
+       struct sk_buff *skb, *cskb;
+       int err = 0;
+
+       pr_debug("%p %zu\n", sk, len);
+
+       lock_sock(sk);
+
+       if (sk->sk_state == LLCP_CLOSED &&
+                       skb_queue_empty(&sk->sk_receive_queue)) {
+               release_sock(sk);
+               return 0;
+       }
+
+       release_sock(sk);
+
+       if (flags & (MSG_OOB))
+               return -EOPNOTSUPP;
+
+       skb = skb_recv_datagram(sk, flags, noblock, &err);
+       if (!skb) {
+               pr_err("Recv datagram failed state %d %d %d",
+                               sk->sk_state, err, sock_error(sk));
+
+               if (sk->sk_shutdown & RCV_SHUTDOWN)
+                       return 0;
+
+               return err;
+       }
+
+       rlen   = skb->len;              /* real length of skb */
+       copied = min_t(unsigned int, rlen, len);
+
+       cskb = skb;
+       if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
+               if (!(flags & MSG_PEEK))
+                       skb_queue_head(&sk->sk_receive_queue, skb);
+               return -EFAULT;
+       }
+
+       /* Mark read part of skb as used */
+       if (!(flags & MSG_PEEK)) {
+
+               /* SOCK_STREAM: re-queue skb if it contains unreceived data */
+               if (sk->sk_type == SOCK_STREAM) {
+                       skb_pull(skb, copied);
+                       if (skb->len) {
+                               skb_queue_head(&sk->sk_receive_queue, skb);
+                               goto done;
+                       }
+               }
+
+               kfree_skb(skb);
+       }
+
+       /* XXX Queue backlogged skbs */
+
+done:
+       /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
+       if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
+               copied = rlen;
+
+       return copied;
+}
+
+static const struct proto_ops llcp_sock_ops = {
+       .family         = PF_NFC,
+       .owner          = THIS_MODULE,
+       .bind           = llcp_sock_bind,
+       .connect        = llcp_sock_connect,
+       .release        = llcp_sock_release,
+       .socketpair     = sock_no_socketpair,
+       .accept         = llcp_sock_accept,
+       .getname        = llcp_sock_getname,
+       .poll           = llcp_sock_poll,
+       .ioctl          = sock_no_ioctl,
+       .listen         = llcp_sock_listen,
+       .shutdown       = sock_no_shutdown,
+       .setsockopt     = sock_no_setsockopt,
+       .getsockopt     = sock_no_getsockopt,
+       .sendmsg        = sock_no_sendmsg,
+       .recvmsg        = llcp_sock_recvmsg,
+       .mmap           = sock_no_mmap,
+};
+
+static void llcp_sock_destruct(struct sock *sk)
+{
+       struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+       pr_debug("%p\n", sk);
+
+       if (sk->sk_state == LLCP_CONNECTED)
+               nfc_put_device(llcp_sock->dev);
+
+       skb_queue_purge(&sk->sk_receive_queue);
+
+       nfc_llcp_sock_free(llcp_sock);
+
+       if (!sock_flag(sk, SOCK_DEAD)) {
+               pr_err("Freeing alive NFC LLCP socket %p\n", sk);
+               return;
+       }
+}
+
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
+{
+       struct sock *sk;
+       struct nfc_llcp_sock *llcp_sock;
+
+       sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto);
+       if (!sk)
+               return NULL;
+
+       llcp_sock = nfc_llcp_sock(sk);
+
+       sock_init_data(sock, sk);
+       sk->sk_state = LLCP_CLOSED;
+       sk->sk_protocol = NFC_SOCKPROTO_LLCP;
+       sk->sk_type = type;
+       sk->sk_destruct = llcp_sock_destruct;
+
+       llcp_sock->ssap = 0;
+       llcp_sock->dsap = LLCP_SAP_SDP;
+       llcp_sock->send_n = llcp_sock->send_ack_n = 0;
+       llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
+       llcp_sock->remote_ready = 1;
+       skb_queue_head_init(&llcp_sock->tx_queue);
+       skb_queue_head_init(&llcp_sock->tx_pending_queue);
+       skb_queue_head_init(&llcp_sock->tx_backlog_queue);
+       INIT_LIST_HEAD(&llcp_sock->list);
+       INIT_LIST_HEAD(&llcp_sock->accept_queue);
+
+       if (sock != NULL)
+               sock->state = SS_UNCONNECTED;
+
+       return sk;
+}
+
+void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
+{
+       kfree(sock->service_name);
+
+       skb_queue_purge(&sock->tx_queue);
+       skb_queue_purge(&sock->tx_pending_queue);
+       skb_queue_purge(&sock->tx_backlog_queue);
+
+       list_del_init(&sock->accept_queue);
+
+       sock->parent = NULL;
+}
+
+static int llcp_sock_create(struct net *net, struct socket *sock,
+                               const struct nfc_protocol *nfc_proto)
+{
+       struct sock *sk;
+
+       pr_debug("%p\n", sock);
+
+       if (sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM)
+               return -ESOCKTNOSUPPORT;
+
+       sock->ops = &llcp_sock_ops;
+
+       sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC);
+       if (sk == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static const struct nfc_protocol llcp_nfc_proto = {
+       .id       = NFC_SOCKPROTO_LLCP,
+       .proto    = &llcp_sock_proto,
+       .owner    = THIS_MODULE,
+       .create   = llcp_sock_create
+};
+
+int __init nfc_llcp_sock_init(void)
+{
+       return nfc_proto_register(&llcp_nfc_proto);
+}
+
+void nfc_llcp_sock_exit(void)
+{
+       nfc_proto_unregister(&llcp_nfc_proto);
+}
index fe5ca89..2deb4ae 100644 (file)
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <linux/types.h>
 #include <linux/workqueue.h>
 #include <linux/completion.h>
@@ -69,7 +71,7 @@ static int __nci_request(struct nci_dev *ndev,
        __u32 timeout)
 {
        int rc = 0;
-       unsigned long completion_rc;
+       long completion_rc;
 
        ndev->req_status = NCI_REQ_PEND;
 
@@ -79,7 +81,7 @@ static int __nci_request(struct nci_dev *ndev,
                                                        &ndev->req_completion,
                                                        timeout);
 
-       nfc_dbg("wait_for_completion return %ld", completion_rc);
+       pr_debug("wait_for_completion return %ld\n", completion_rc);
 
        if (completion_rc > 0) {
                switch (ndev->req_status) {
@@ -96,8 +98,8 @@ static int __nci_request(struct nci_dev *ndev,
                        break;
                }
        } else {
-               nfc_err("wait_for_completion_interruptible_timeout failed %ld",
-                       completion_rc);
+               pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
+                      completion_rc);
 
                rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
        }
@@ -323,8 +325,6 @@ static void nci_cmd_timer(unsigned long arg)
 {
        struct nci_dev *ndev = (void *) arg;
 
-       nfc_dbg("entry");
-
        atomic_set(&ndev->cmd_cnt, 1);
        queue_work(ndev->cmd_wq, &ndev->cmd_work);
 }
@@ -333,8 +333,6 @@ static int nci_dev_up(struct nfc_dev *nfc_dev)
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dbg("entry");
-
        return nci_open_device(ndev);
 }
 
@@ -342,8 +340,6 @@ static int nci_dev_down(struct nfc_dev *nfc_dev)
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dbg("entry");
-
        return nci_close_device(ndev);
 }
 
@@ -352,20 +348,18 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
        int rc;
 
-       nfc_dbg("entry");
-
        if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
-               nfc_err("unable to start poll, since poll is already active");
+               pr_err("unable to start poll, since poll is already active\n");
                return -EBUSY;
        }
 
        if (ndev->target_active_prot) {
-               nfc_err("there is an active target");
+               pr_err("there is an active target\n");
                return -EBUSY;
        }
 
        if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
-               nfc_dbg("target is active, implicitly deactivate...");
+               pr_debug("target is active, implicitly deactivate...\n");
 
                rc = nci_request(ndev, nci_rf_deactivate_req, 0,
                        msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
@@ -386,10 +380,8 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dbg("entry");
-
        if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
-               nfc_err("unable to stop poll, since poll is not active");
+               pr_err("unable to stop poll, since poll is not active\n");
                return;
        }
 
@@ -402,21 +394,21 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
+       pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
 
        if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
-               nfc_err("there is no available target to activate");
+               pr_err("there is no available target to activate\n");
                return -EINVAL;
        }
 
        if (ndev->target_active_prot) {
-               nfc_err("there is already an active target");
+               pr_err("there is already an active target\n");
                return -EBUSY;
        }
 
        if (!(ndev->target_available_prots & (1 << protocol))) {
-               nfc_err("target does not support the requested protocol 0x%x",
-                       protocol);
+               pr_err("target does not support the requested protocol 0x%x\n",
+                      protocol);
                return -EINVAL;
        }
 
@@ -430,10 +422,10 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dbg("entry, target_idx %d", target_idx);
+       pr_debug("target_idx %d\n", target_idx);
 
        if (!ndev->target_active_prot) {
-               nfc_err("unable to deactivate target, no active target");
+               pr_err("unable to deactivate target, no active target\n");
                return;
        }
 
@@ -453,10 +445,10 @@ static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
        int rc;
 
-       nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
+       pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
 
        if (!ndev->target_active_prot) {
-               nfc_err("unable to exchange data, no active target");
+               pr_err("unable to exchange data, no active target\n");
                return -EINVAL;
        }
 
@@ -499,7 +491,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
 {
        struct nci_dev *ndev;
 
-       nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
+       pr_debug("supported_protocols 0x%x\n", supported_protocols);
 
        if (!ops->open || !ops->close || !ops->send)
                return NULL;
@@ -539,8 +531,6 @@ EXPORT_SYMBOL(nci_allocate_device);
  */
 void nci_free_device(struct nci_dev *ndev)
 {
-       nfc_dbg("entry");
-
        nfc_free_device(ndev->nfc_dev);
        kfree(ndev);
 }
@@ -557,8 +547,6 @@ int nci_register_device(struct nci_dev *ndev)
        struct device *dev = &ndev->nfc_dev->dev;
        char name[32];
 
-       nfc_dbg("entry");
-
        rc = nfc_register_device(ndev->nfc_dev);
        if (rc)
                goto exit;
@@ -621,8 +609,6 @@ EXPORT_SYMBOL(nci_register_device);
  */
 void nci_unregister_device(struct nci_dev *ndev)
 {
-       nfc_dbg("entry");
-
        nci_close_device(ndev);
 
        destroy_workqueue(ndev->cmd_wq);
@@ -642,7 +628,7 @@ int nci_recv_frame(struct sk_buff *skb)
 {
        struct nci_dev *ndev = (struct nci_dev *) skb->dev;
 
-       nfc_dbg("entry, len %d", skb->len);
+       pr_debug("len %d\n", skb->len);
 
        if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
                && !test_bit(NCI_INIT, &ndev->flags))) {
@@ -662,7 +648,7 @@ static int nci_send_frame(struct sk_buff *skb)
 {
        struct nci_dev *ndev = (struct nci_dev *) skb->dev;
 
-       nfc_dbg("entry, len %d", skb->len);
+       pr_debug("len %d\n", skb->len);
 
        if (!ndev) {
                kfree_skb(skb);
@@ -681,11 +667,11 @@ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
        struct nci_ctrl_hdr *hdr;
        struct sk_buff *skb;
 
-       nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
+       pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
 
        skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
        if (!skb) {
-               nfc_err("no memory for command");
+               pr_err("no memory for command\n");
                return -ENOMEM;
        }
 
@@ -715,7 +701,7 @@ static void nci_tx_work(struct work_struct *work)
        struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
        struct sk_buff *skb;
 
-       nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
+       pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
 
        /* Send queued tx data */
        while (atomic_read(&ndev->credits_cnt)) {
@@ -728,10 +714,10 @@ static void nci_tx_work(struct work_struct *work)
                                NCI_DATA_FLOW_CONTROL_NOT_USED)
                        atomic_dec(&ndev->credits_cnt);
 
-               nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
-                               nci_pbf(skb->data),
-                               nci_conn_id(skb->data),
-                               nci_plen(skb->data));
+               pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
+                        nci_pbf(skb->data),
+                        nci_conn_id(skb->data),
+                        nci_plen(skb->data));
 
                nci_send_frame(skb);
        }
@@ -760,7 +746,7 @@ static void nci_rx_work(struct work_struct *work)
                        break;
 
                default:
-                       nfc_err("unknown MT 0x%x", nci_mt(skb->data));
+                       pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
                        kfree_skb(skb);
                        break;
                }
@@ -774,7 +760,7 @@ static void nci_cmd_work(struct work_struct *work)
        struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
        struct sk_buff *skb;
 
-       nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
+       pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
 
        /* Send queued command */
        if (atomic_read(&ndev->cmd_cnt)) {
@@ -784,11 +770,11 @@ static void nci_cmd_work(struct work_struct *work)
 
                atomic_dec(&ndev->cmd_cnt);
 
-               nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
-                               nci_pbf(skb->data),
-                               nci_opcode_gid(nci_opcode(skb->data)),
-                               nci_opcode_oid(nci_opcode(skb->data)),
-                               nci_plen(skb->data));
+               pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+                        nci_pbf(skb->data),
+                        nci_opcode_gid(nci_opcode(skb->data)),
+                        nci_opcode_oid(nci_opcode(skb->data)),
+                        nci_plen(skb->data));
 
                nci_send_frame(skb);
 
index 511fb96..e5756b3 100644 (file)
@@ -21,6 +21,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/wait.h>
@@ -40,7 +42,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev,
        data_exchange_cb_t cb = ndev->data_exchange_cb;
        void *cb_context = ndev->data_exchange_cb_context;
 
-       nfc_dbg("entry, len %d, err %d", ((skb) ? (skb->len) : (0)), err);
+       pr_debug("len %d, err %d\n", skb ? skb->len : 0, err);
 
        if (cb) {
                ndev->data_exchange_cb = NULL;
@@ -49,7 +51,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev,
                /* forward skb to nfc core */
                cb(cb_context, skb, err);
        } else if (skb) {
-               nfc_err("no rx callback, dropping rx data...");
+               pr_err("no rx callback, dropping rx data...\n");
 
                /* no waiting callback, free skb */
                kfree_skb(skb);
@@ -90,7 +92,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
        int frag_len;
        int rc = 0;
 
-       nfc_dbg("entry, conn_id 0x%x, total_len %d", conn_id, total_len);
+       pr_debug("conn_id 0x%x, total_len %d\n", conn_id, total_len);
 
        __skb_queue_head_init(&frags_q);
 
@@ -119,8 +121,8 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
                data += frag_len;
                total_len -= frag_len;
 
-               nfc_dbg("frag_len %d, remaining total_len %d",
-                       frag_len, total_len);
+               pr_debug("frag_len %d, remaining total_len %d\n",
+                        frag_len, total_len);
        }
 
        /* queue all fragments atomically */
@@ -149,7 +151,7 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
 {
        int rc = 0;
 
-       nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len);
+       pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len);
 
        /* check if the packet need to be fragmented */
        if (skb->len <= ndev->max_data_pkt_payload_size) {
@@ -161,7 +163,7 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
                /* fragment packet and queue the fragments */
                rc = nci_queue_tx_data_frags(ndev, conn_id, skb);
                if (rc) {
-                       nfc_err("failed to fragment tx data packet");
+                       pr_err("failed to fragment tx data packet\n");
                        goto free_exit;
                }
        }
@@ -191,7 +193,7 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
 
                /* first, make enough room for the already accumulated data */
                if (skb_cow_head(skb, reassembly_len)) {
-                       nfc_err("error adding room for accumulated rx data");
+                       pr_err("error adding room for accumulated rx data\n");
 
                        kfree_skb(skb);
                        skb = 0;
@@ -228,19 +230,19 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
        __u8 pbf = nci_pbf(skb->data);
 
-       nfc_dbg("entry, len %d", skb->len);
+       pr_debug("len %d\n", skb->len);
 
-       nfc_dbg("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d",
-                       nci_pbf(skb->data),
-                       nci_conn_id(skb->data),
-                       nci_plen(skb->data));
+       pr_debug("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
+                nci_pbf(skb->data),
+                nci_conn_id(skb->data),
+                nci_plen(skb->data));
 
        /* strip the nci data header */
        skb_pull(skb, NCI_DATA_HDR_SIZE);
 
        if (ndev->target_active_prot == NFC_PROTO_MIFARE) {
                /* frame I/F => remove the status byte */
-               nfc_dbg("NFC_PROTO_MIFARE => remove the status byte");
+               pr_debug("NFC_PROTO_MIFARE => remove the status byte\n");
                skb_trim(skb, (skb->len - 1));
        }
 
index c1bf541..003846b 100644 (file)
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
@@ -43,16 +45,16 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
        struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
        int i;
 
-       nfc_dbg("entry, num_entries %d", ntf->num_entries);
+       pr_debug("num_entries %d\n", ntf->num_entries);
 
        if (ntf->num_entries > NCI_MAX_NUM_CONN)
                ntf->num_entries = NCI_MAX_NUM_CONN;
 
        /* update the credits */
        for (i = 0; i < ntf->num_entries; i++) {
-               nfc_dbg("entry[%d]: conn_id %d, credits %d", i,
-                       ntf->conn_entries[i].conn_id,
-                       ntf->conn_entries[i].credits);
+               pr_debug("entry[%d]: conn_id %d, credits %d\n",
+                        i, ntf->conn_entries[i].conn_id,
+                        ntf->conn_entries[i].credits);
 
                if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) {
                        /* found static rf connection */
@@ -78,9 +80,8 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
 
        nfca_poll->nfcid1_len = *data++;
 
-       nfc_dbg("sens_res 0x%x, nfcid1_len %d",
-               nfca_poll->sens_res,
-               nfca_poll->nfcid1_len);
+       pr_debug("sens_res 0x%x, nfcid1_len %d\n",
+                nfca_poll->sens_res, nfca_poll->nfcid1_len);
 
        memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len);
        data += nfca_poll->nfcid1_len;
@@ -90,9 +91,9 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
        if (nfca_poll->sel_res_len != 0)
                nfca_poll->sel_res = *data++;
 
-       nfc_dbg("sel_res_len %d, sel_res 0x%x",
-               nfca_poll->sel_res_len,
-               nfca_poll->sel_res);
+       pr_debug("sel_res_len %d, sel_res 0x%x\n",
+                nfca_poll->sel_res_len,
+                nfca_poll->sel_res);
 
        return data;
 }
@@ -114,8 +115,8 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
                break;
 
        default:
-               nfc_err("unsupported activation_rf_tech_and_mode 0x%x",
-                       ntf->activation_rf_tech_and_mode);
+               pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
+                      ntf->activation_rf_tech_and_mode);
                return -EPROTO;
        }
 
@@ -138,12 +139,12 @@ static void nci_target_found(struct nci_dev *ndev,
        nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
 
        if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) {
-               nfc_dbg("the target found does not have the desired protocol");
+               pr_debug("the target found does not have the desired protocol\n");
                return;
        }
 
-       nfc_dbg("new target found,  supported_protocols 0x%x",
-               nfc_tgt.supported_protocols);
+       pr_debug("new target found,  supported_protocols 0x%x\n",
+                nfc_tgt.supported_protocols);
 
        ndev->target_available_prots = nfc_tgt.supported_protocols;
 
@@ -166,13 +167,13 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
        ntf.activation_rf_tech_and_mode = *data++;
        ntf.rf_tech_specific_params_len = *data++;
 
-       nfc_dbg("rf_discovery_id %d", ntf.rf_discovery_id);
-       nfc_dbg("rf_interface_type 0x%x", ntf.rf_interface_type);
-       nfc_dbg("rf_protocol 0x%x", ntf.rf_protocol);
-       nfc_dbg("activation_rf_tech_and_mode 0x%x",
-               ntf.activation_rf_tech_and_mode);
-       nfc_dbg("rf_tech_specific_params_len %d",
-               ntf.rf_tech_specific_params_len);
+       pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id);
+       pr_debug("rf_interface_type 0x%x\n", ntf.rf_interface_type);
+       pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol);
+       pr_debug("activation_rf_tech_and_mode 0x%x\n",
+                ntf.activation_rf_tech_and_mode);
+       pr_debug("rf_tech_specific_params_len %d\n",
+                ntf.rf_tech_specific_params_len);
 
        if (ntf.rf_tech_specific_params_len > 0) {
                switch (ntf.activation_rf_tech_and_mode) {
@@ -182,8 +183,8 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
                        break;
 
                default:
-                       nfc_err("unsupported activation_rf_tech_and_mode 0x%x",
-                               ntf.activation_rf_tech_and_mode);
+                       pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
+                              ntf.activation_rf_tech_and_mode);
                        return;
                }
        }
@@ -193,14 +194,14 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
        ntf.data_exch_rx_bit_rate = *data++;
        ntf.activation_params_len = *data++;
 
-       nfc_dbg("data_exch_rf_tech_and_mode 0x%x",
-               ntf.data_exch_rf_tech_and_mode);
-       nfc_dbg("data_exch_tx_bit_rate 0x%x",
-               ntf.data_exch_tx_bit_rate);
-       nfc_dbg("data_exch_rx_bit_rate 0x%x",
-               ntf.data_exch_rx_bit_rate);
-       nfc_dbg("activation_params_len %d",
-               ntf.activation_params_len);
+       pr_debug("data_exch_rf_tech_and_mode 0x%x\n",
+                ntf.data_exch_rf_tech_and_mode);
+       pr_debug("data_exch_tx_bit_rate 0x%x\n",
+                ntf.data_exch_tx_bit_rate);
+       pr_debug("data_exch_rx_bit_rate 0x%x\n",
+                ntf.data_exch_rx_bit_rate);
+       pr_debug("activation_params_len %d\n",
+                ntf.activation_params_len);
 
        if (ntf.activation_params_len > 0) {
                switch (ntf.rf_interface_type) {
@@ -214,8 +215,8 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
                        break;
 
                default:
-                       nfc_err("unsupported rf_interface_type 0x%x",
-                               ntf.rf_interface_type);
+                       pr_err("unsupported rf_interface_type 0x%x\n",
+                              ntf.rf_interface_type);
                        return;
                }
        }
@@ -229,7 +230,7 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
 {
        struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
 
-       nfc_dbg("entry, type 0x%x, reason 0x%x", ntf->type, ntf->reason);
+       pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
 
        clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
        ndev->target_active_prot = 0;
@@ -255,11 +256,11 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
        __u16 ntf_opcode = nci_opcode(skb->data);
 
-       nfc_dbg("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
-                       nci_pbf(skb->data),
-                       nci_opcode_gid(ntf_opcode),
-                       nci_opcode_oid(ntf_opcode),
-                       nci_plen(skb->data));
+       pr_debug("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+                nci_pbf(skb->data),
+                nci_opcode_gid(ntf_opcode),
+                nci_opcode_oid(ntf_opcode),
+                nci_plen(skb->data));
 
        /* strip the nci control header */
        skb_pull(skb, NCI_CTRL_HDR_SIZE);
@@ -278,7 +279,7 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
                break;
 
        default:
-               nfc_err("unknown ntf opcode 0x%x", ntf_opcode);
+               pr_err("unknown ntf opcode 0x%x\n", ntf_opcode);
                break;
        }
 
index 0591f5a..3f444c8 100644 (file)
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
@@ -40,12 +42,12 @@ static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
        struct nci_core_reset_rsp *rsp = (void *) skb->data;
 
-       nfc_dbg("entry, status 0x%x", rsp->status);
+       pr_debug("status 0x%x\n", rsp->status);
 
        if (rsp->status == NCI_STATUS_OK) {
                ndev->nci_ver = rsp->nci_ver;
-               nfc_dbg("nci_ver 0x%x, config_status 0x%x",
-                       rsp->nci_ver, rsp->config_status);
+               pr_debug("nci_ver 0x%x, config_status 0x%x\n",
+                        rsp->nci_ver, rsp->config_status);
        }
 
        nci_req_complete(ndev, rsp->status);
@@ -56,7 +58,7 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
        struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data;
        struct nci_core_init_rsp_2 *rsp_2;
 
-       nfc_dbg("entry, status 0x%x", rsp_1->status);
+       pr_debug("status 0x%x\n", rsp_1->status);
 
        if (rsp_1->status != NCI_STATUS_OK)
                goto exit;
@@ -95,34 +97,34 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
 
        atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
 
-       nfc_dbg("nfcc_features 0x%x",
-               ndev->nfcc_features);
-       nfc_dbg("num_supported_rf_interfaces %d",
-               ndev->num_supported_rf_interfaces);
-       nfc_dbg("supported_rf_interfaces[0] 0x%x",
-               ndev->supported_rf_interfaces[0]);
-       nfc_dbg("supported_rf_interfaces[1] 0x%x",
-               ndev->supported_rf_interfaces[1]);
-       nfc_dbg("supported_rf_interfaces[2] 0x%x",
-               ndev->supported_rf_interfaces[2]);
-       nfc_dbg("supported_rf_interfaces[3] 0x%x",
-               ndev->supported_rf_interfaces[3]);
-       nfc_dbg("max_logical_connections %d",
-               ndev->max_logical_connections);
-       nfc_dbg("max_routing_table_size %d",
-               ndev->max_routing_table_size);
-       nfc_dbg("max_ctrl_pkt_payload_len %d",
-               ndev->max_ctrl_pkt_payload_len);
-       nfc_dbg("max_size_for_large_params %d",
-               ndev->max_size_for_large_params);
-       nfc_dbg("max_data_pkt_payload_size %d",
-               ndev->max_data_pkt_payload_size);
-       nfc_dbg("initial_num_credits %d",
-               ndev->initial_num_credits);
-       nfc_dbg("manufact_id 0x%x",
-               ndev->manufact_id);
-       nfc_dbg("manufact_specific_info 0x%x",
-               ndev->manufact_specific_info);
+       pr_debug("nfcc_features 0x%x\n",
+                ndev->nfcc_features);
+       pr_debug("num_supported_rf_interfaces %d\n",
+                ndev->num_supported_rf_interfaces);
+       pr_debug("supported_rf_interfaces[0] 0x%x\n",
+                ndev->supported_rf_interfaces[0]);
+       pr_debug("supported_rf_interfaces[1] 0x%x\n",
+                ndev->supported_rf_interfaces[1]);
+       pr_debug("supported_rf_interfaces[2] 0x%x\n",
+                ndev->supported_rf_interfaces[2]);
+       pr_debug("supported_rf_interfaces[3] 0x%x\n",
+                ndev->supported_rf_interfaces[3]);
+       pr_debug("max_logical_connections %d\n",
+                ndev->max_logical_connections);
+       pr_debug("max_routing_table_size %d\n",
+                ndev->max_routing_table_size);
+       pr_debug("max_ctrl_pkt_payload_len %d\n",
+                ndev->max_ctrl_pkt_payload_len);
+       pr_debug("max_size_for_large_params %d\n",
+                ndev->max_size_for_large_params);
+       pr_debug("max_data_pkt_payload_size %d\n",
+                ndev->max_data_pkt_payload_size);
+       pr_debug("initial_num_credits %d\n",
+                ndev->initial_num_credits);
+       pr_debug("manufact_id 0x%x\n",
+                ndev->manufact_id);
+       pr_debug("manufact_specific_info 0x%x\n",
+                ndev->manufact_specific_info);
 
 exit:
        nci_req_complete(ndev, rsp_1->status);
@@ -133,7 +135,7 @@ static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
 {
        __u8 status = skb->data[0];
 
-       nfc_dbg("entry, status 0x%x", status);
+       pr_debug("status 0x%x\n", status);
 
        nci_req_complete(ndev, status);
 }
@@ -142,7 +144,7 @@ static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
        __u8 status = skb->data[0];
 
-       nfc_dbg("entry, status 0x%x", status);
+       pr_debug("status 0x%x\n", status);
 
        if (status == NCI_STATUS_OK)
                set_bit(NCI_DISCOVERY, &ndev->flags);
@@ -155,7 +157,7 @@ static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
 {
        __u8 status = skb->data[0];
 
-       nfc_dbg("entry, status 0x%x", status);
+       pr_debug("status 0x%x\n", status);
 
        clear_bit(NCI_DISCOVERY, &ndev->flags);
 
@@ -169,11 +171,11 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
        /* we got a rsp, stop the cmd timer */
        del_timer(&ndev->cmd_timer);
 
-       nfc_dbg("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
-                       nci_pbf(skb->data),
-                       nci_opcode_gid(rsp_opcode),
-                       nci_opcode_oid(rsp_opcode),
-                       nci_plen(skb->data));
+       pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+                nci_pbf(skb->data),
+                nci_opcode_gid(rsp_opcode),
+                nci_opcode_oid(rsp_opcode),
+                nci_plen(skb->data));
 
        /* strip the nci control header */
        skb_pull(skb, NCI_CTRL_HDR_SIZE);
@@ -200,7 +202,7 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
                break;
 
        default:
-               nfc_err("unknown rsp opcode 0x%x", rsp_opcode);
+               pr_err("unknown rsp opcode 0x%x\n", rsp_opcode);
                break;
        }
 
index 03f8818..43a1c47 100644 (file)
@@ -21,6 +21,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <net/genetlink.h>
 #include <linux/nfc.h>
 #include <linux/slab.h>
@@ -44,6 +46,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
        [NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
                                .len = NFC_DEVICE_NAME_MAXSIZE },
        [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
+       [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
+       [NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
 };
 
 static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
@@ -51,8 +55,6 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
 {
        void *hdr;
 
-       nfc_dbg("entry");
-
        hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
                                &nfc_genl_family, flags, NFC_CMD_GET_TARGET);
        if (!hdr)
@@ -105,8 +107,6 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
        struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
        int rc;
 
-       nfc_dbg("entry");
-
        if (!dev) {
                dev = __get_device_from_cb(cb);
                if (IS_ERR(dev))
@@ -139,8 +139,6 @@ static int nfc_genl_dump_targets_done(struct netlink_callback *cb)
 {
        struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
 
-       nfc_dbg("entry");
-
        if (dev)
                nfc_put_device(dev);
 
@@ -152,8 +150,6 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
        struct sk_buff *msg;
        void *hdr;
 
-       nfc_dbg("entry");
-
        dev->genl_data.poll_req_pid = 0;
 
        msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -183,8 +179,6 @@ int nfc_genl_device_added(struct nfc_dev *dev)
        struct sk_buff *msg;
        void *hdr;
 
-       nfc_dbg("entry");
-
        msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
@@ -216,8 +210,6 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
        struct sk_buff *msg;
        void *hdr;
 
-       nfc_dbg("entry");
-
        msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
@@ -249,8 +241,6 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
 {
        void *hdr;
 
-       nfc_dbg("entry");
-
        hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
                                                        NFC_CMD_GET_DEVICE);
        if (!hdr)
@@ -277,8 +267,6 @@ static int nfc_genl_dump_devices(struct sk_buff *skb,
        struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
        bool first_call = false;
 
-       nfc_dbg("entry");
-
        if (!iter) {
                first_call = true;
                iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
@@ -319,14 +307,81 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
 {
        struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
 
-       nfc_dbg("entry");
-
        nfc_device_iter_exit(iter);
        kfree(iter);
 
        return 0;
 }
 
+int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
+                                               u8 comm_mode, u8 rf_mode)
+{
+       struct sk_buff *msg;
+       void *hdr;
+
+       pr_debug("DEP link is up\n");
+
+       msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+       if (!msg)
+               return -ENOMEM;
+
+       hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+                               NFC_CMD_DEP_LINK_UP);
+       if (!hdr)
+               goto free_msg;
+
+       NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+       if (rf_mode == NFC_RF_INITIATOR)
+               NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx);
+       NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode);
+       NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode);
+
+       genlmsg_end(msg, hdr);
+
+       dev->dep_link_up = true;
+
+       genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
+
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+free_msg:
+       nlmsg_free(msg);
+       return -EMSGSIZE;
+}
+
+int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
+{
+       struct sk_buff *msg;
+       void *hdr;
+
+       pr_debug("DEP link is down\n");
+
+       msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+       if (!msg)
+               return -ENOMEM;
+
+       hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+                               NFC_CMD_DEP_LINK_DOWN);
+       if (!hdr)
+               goto free_msg;
+
+       NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+
+       genlmsg_end(msg, hdr);
+
+       genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
+
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+free_msg:
+       nlmsg_free(msg);
+       return -EMSGSIZE;
+}
+
 static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
 {
        struct sk_buff *msg;
@@ -334,8 +389,6 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
        u32 idx;
        int rc = -ENOBUFS;
 
-       nfc_dbg("entry");
-
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
                return -EINVAL;
 
@@ -373,8 +426,6 @@ static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info)
        int rc;
        u32 idx;
 
-       nfc_dbg("entry");
-
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
                return -EINVAL;
 
@@ -396,8 +447,6 @@ static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info)
        int rc;
        u32 idx;
 
-       nfc_dbg("entry");
-
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
                return -EINVAL;
 
@@ -420,7 +469,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
        u32 idx;
        u32 protocols;
 
-       nfc_dbg("entry");
+       pr_debug("Poll start\n");
 
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
                !info->attrs[NFC_ATTR_PROTOCOLS])
@@ -451,8 +500,6 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
        int rc;
        u32 idx;
 
-       nfc_dbg("entry");
-
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
                return -EINVAL;
 
@@ -478,6 +525,67 @@ out:
        return rc;
 }
 
+static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nfc_dev *dev;
+       int rc, tgt_idx;
+       u32 idx;
+       u8 comm, rf;
+
+       pr_debug("DEP link up\n");
+
+       if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+                       !info->attrs[NFC_ATTR_COMM_MODE] ||
+                       !info->attrs[NFC_ATTR_RF_MODE])
+               return -EINVAL;
+
+       idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+       if (!info->attrs[NFC_ATTR_TARGET_INDEX])
+               tgt_idx = NFC_TARGET_IDX_ANY;
+       else
+               tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]);
+
+       comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]);
+       rf = nla_get_u8(info->attrs[NFC_ATTR_RF_MODE]);
+
+       if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE)
+               return -EINVAL;
+
+       if (rf != NFC_RF_INITIATOR && comm != NFC_RF_TARGET)
+               return -EINVAL;
+
+       dev = nfc_get_device(idx);
+       if (!dev)
+               return -ENODEV;
+
+       rc = nfc_dep_link_up(dev, tgt_idx, comm, rf);
+
+       nfc_put_device(dev);
+
+       return rc;
+}
+
+static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nfc_dev *dev;
+       int rc;
+       u32 idx;
+
+       if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+               return -EINVAL;
+
+       idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+
+       dev = nfc_get_device(idx);
+       if (!dev)
+               return -ENODEV;
+
+       rc = nfc_dep_link_down(dev);
+
+       nfc_put_device(dev);
+       return rc;
+}
+
 static struct genl_ops nfc_genl_ops[] = {
        {
                .cmd = NFC_CMD_GET_DEVICE,
@@ -506,6 +614,16 @@ static struct genl_ops nfc_genl_ops[] = {
                .doit = nfc_genl_stop_poll,
                .policy = nfc_genl_policy,
        },
+       {
+               .cmd = NFC_CMD_DEP_LINK_UP,
+               .doit = nfc_genl_dep_link_up,
+               .policy = nfc_genl_policy,
+       },
+       {
+               .cmd = NFC_CMD_DEP_LINK_DOWN,
+               .doit = nfc_genl_dep_link_down,
+               .policy = nfc_genl_policy,
+       },
        {
                .cmd = NFC_CMD_GET_TARGET,
                .dumpit = nfc_genl_dump_targets,
@@ -524,18 +642,16 @@ static int nfc_genl_rcv_nl_event(struct notifier_block *this,
        if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
                goto out;
 
-       nfc_dbg("NETLINK_URELEASE event from id %d", n->pid);
+       pr_debug("NETLINK_URELEASE event from id %d\n", n->pid);
 
        nfc_device_iter_init(&iter);
        dev = nfc_device_iter_next(&iter);
 
        while (dev) {
-               mutex_lock(&dev->genl_data.genl_data_mutex);
                if (dev->genl_data.poll_req_pid == n->pid) {
                        nfc_stop_poll(dev);
                        dev->genl_data.poll_req_pid = 0;
                }
-               mutex_unlock(&dev->genl_data.genl_data_mutex);
                dev = nfc_device_iter_next(&iter);
        }
 
index d86583f..2c2c401 100644 (file)
 #include <net/nfc/nfc.h>
 #include <net/sock.h>
 
-__printf(2, 3)
-int nfc_printk(const char *level, const char *fmt, ...);
-
-#define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg)
-#define nfc_err(fmt, arg...) nfc_printk(KERN_ERR, fmt, ##arg)
-#define nfc_dbg(fmt, arg...) pr_debug(fmt "\n", ##arg)
-
 struct nfc_protocol {
        int id;
        struct proto *proto;
@@ -53,6 +46,60 @@ struct nfc_rawsock {
 #define to_rawsock_sk(_tx_work) \
        ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work))
 
+#ifdef CONFIG_NFC_LLCP
+
+void nfc_llcp_mac_is_down(struct nfc_dev *dev);
+void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+                       u8 comm_mode, u8 rf_mode);
+int nfc_llcp_register_device(struct nfc_dev *dev);
+void nfc_llcp_unregister_device(struct nfc_dev *dev);
+int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
+u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len);
+int __init nfc_llcp_init(void);
+void nfc_llcp_exit(void);
+
+#else
+
+void nfc_llcp_mac_is_down(struct nfc_dev *dev)
+{
+}
+
+void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+                       u8 comm_mode, u8 rf_mode)
+{
+}
+
+static inline int nfc_llcp_register_device(struct nfc_dev *dev)
+{
+       return 0;
+}
+
+static inline void nfc_llcp_unregister_device(struct nfc_dev *dev)
+{
+}
+
+static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+{
+       return 0;
+}
+
+static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *gb_len)
+{
+       *gb_len = 0;
+       return NULL;
+}
+
+static inline int nfc_llcp_init(void)
+{
+       return 0;
+}
+
+static inline void nfc_llcp_exit(void)
+{
+}
+
+#endif
+
 int __init rawsock_init(void);
 void rawsock_exit(void);
 
@@ -75,6 +122,10 @@ int nfc_genl_targets_found(struct nfc_dev *dev);
 int nfc_genl_device_added(struct nfc_dev *dev);
 int nfc_genl_device_removed(struct nfc_dev *dev);
 
+int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
+                              u8 comm_mode, u8 rf_mode);
+int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
+
 struct nfc_dev *nfc_get_device(unsigned idx);
 
 static inline void nfc_put_device(struct nfc_dev *dev)
@@ -109,6 +160,11 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols);
 
 int nfc_stop_poll(struct nfc_dev *dev);
 
+int nfc_dep_link_up(struct nfc_dev *dev, int target_idx,
+                               u8 comm_mode, u8 rf_mode);
+
+int nfc_dep_link_down(struct nfc_dev *dev);
+
 int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol);
 
 int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx);
index ee7b2b3..2e2f8c6 100644 (file)
@@ -21,6 +21,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <net/tcp_states.h>
 #include <linux/nfc.h>
 #include <linux/export.h>
@@ -29,7 +31,7 @@
 
 static void rawsock_write_queue_purge(struct sock *sk)
 {
-       nfc_dbg("sk=%p", sk);
+       pr_debug("sk=%p\n", sk);
 
        spin_lock_bh(&sk->sk_write_queue.lock);
        __skb_queue_purge(&sk->sk_write_queue);
@@ -39,7 +41,7 @@ static void rawsock_write_queue_purge(struct sock *sk)
 
 static void rawsock_report_error(struct sock *sk, int err)
 {
-       nfc_dbg("sk=%p err=%d", sk, err);
+       pr_debug("sk=%p err=%d\n", sk, err);
 
        sk->sk_shutdown = SHUTDOWN_MASK;
        sk->sk_err = -err;
@@ -52,7 +54,7 @@ static int rawsock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
-       nfc_dbg("sock=%p", sock);
+       pr_debug("sock=%p\n", sock);
 
        sock_orphan(sk);
        sock_put(sk);
@@ -68,14 +70,14 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
        struct nfc_dev *dev;
        int rc = 0;
 
-       nfc_dbg("sock=%p sk=%p flags=%d", sock, sk, flags);
+       pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags);
 
        if (!addr || len < sizeof(struct sockaddr_nfc) ||
                addr->sa_family != AF_NFC)
                return -EINVAL;
 
-       nfc_dbg("addr dev_idx=%u target_idx=%u protocol=%u", addr->dev_idx,
-                                       addr->target_idx, addr->nfc_protocol);
+       pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n",
+                addr->dev_idx, addr->target_idx, addr->nfc_protocol);
 
        lock_sock(sk);
 
@@ -136,7 +138,7 @@ static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
 
        BUG_ON(in_irq());
 
-       nfc_dbg("sk=%p err=%d", sk, err);
+       pr_debug("sk=%p err=%d\n", sk, err);
 
        if (err)
                goto error;
@@ -172,7 +174,7 @@ static void rawsock_tx_work(struct work_struct *work)
        struct sk_buff *skb;
        int rc;
 
-       nfc_dbg("sk=%p target_idx=%u", sk, target_idx);
+       pr_debug("sk=%p target_idx=%u\n", sk, target_idx);
 
        if (sk->sk_shutdown & SEND_SHUTDOWN) {
                rawsock_write_queue_purge(sk);
@@ -198,7 +200,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
        struct sk_buff *skb;
        int rc;
 
-       nfc_dbg("sock=%p sk=%p len=%zu", sock, sk, len);
+       pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len);
 
        if (msg->msg_namelen)
                return -EOPNOTSUPP;
@@ -206,13 +208,10 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
        if (sock->state != SS_CONNECTED)
                return -ENOTCONN;
 
-       skb = sock_alloc_send_skb(sk, len + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE,
-                                       msg->msg_flags & MSG_DONTWAIT, &rc);
-       if (!skb)
+       skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc);
+       if (skb == NULL)
                return rc;
 
-       skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
-
        rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
        if (rc < 0) {
                kfree_skb(skb);
@@ -239,7 +238,7 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
        int copied;
        int rc;
 
-       nfc_dbg("sock=%p sk=%p len=%zu flags=%d", sock, sk, len, flags);
+       pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags);
 
        skb = skb_recv_datagram(sk, flags, noblock, &rc);
        if (!skb)
@@ -283,7 +282,7 @@ static const struct proto_ops rawsock_ops = {
 
 static void rawsock_destruct(struct sock *sk)
 {
-       nfc_dbg("sk=%p", sk);
+       pr_debug("sk=%p\n", sk);
 
        if (sk->sk_state == TCP_ESTABLISHED) {
                nfc_deactivate_target(nfc_rawsock(sk)->dev,
@@ -294,7 +293,7 @@ static void rawsock_destruct(struct sock *sk)
        skb_queue_purge(&sk->sk_receive_queue);
 
        if (!sock_flag(sk, SOCK_DEAD)) {
-               nfc_err("Freeing alive NFC raw socket %p", sk);
+               pr_err("Freeing alive NFC raw socket %p\n", sk);
                return;
        }
 }
@@ -304,14 +303,14 @@ static int rawsock_create(struct net *net, struct socket *sock,
 {
        struct sock *sk;
 
-       nfc_dbg("sock=%p", sock);
+       pr_debug("sock=%p\n", sock);
 
        if (sock->type != SOCK_SEQPACKET)
                return -ESOCKTNOSUPPORT;
 
        sock->ops = &rawsock_ops;
 
-       sk = sk_alloc(net, PF_NFC, GFP_KERNEL, nfc_proto->proto);
+       sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto);
        if (!sk)
                return -ENOMEM;
 
index 6cd7601..7f0ef37 100644 (file)
@@ -127,8 +127,10 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
        vport->ops = ops;
 
        vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
-       if (!vport->percpu_stats)
+       if (!vport->percpu_stats) {
+               kfree(vport);
                return ERR_PTR(-ENOMEM);
+       }
 
        spin_lock_init(&vport->stats_lock);
 
index 0da505c..e56ca75 100644 (file)
@@ -1631,8 +1631,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        if (snaplen > res)
                snaplen = res;
 
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf)
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                goto drop_n_acct;
 
        if (skb_shared(skb)) {
@@ -1763,8 +1762,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        if (po->tp_version <= TPACKET_V2) {
                if (macoff + snaplen > po->rx_ring.frame_size) {
                        if (po->copy_thresh &&
-                               atomic_read(&sk->sk_rmem_alloc) + skb->truesize
-                               < (unsigned)sk->sk_rcvbuf) {
+                           atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
                                if (skb_shared(skb)) {
                                        copy_skb = skb_clone(skb, GFP_ATOMIC);
                                } else {
index 128677d..ca355e7 100644 (file)
@@ -220,18 +220,7 @@ static struct platform_driver rfkill_gpio_driver = {
        },
 };
 
-static int __init rfkill_gpio_init(void)
-{
-       return platform_driver_register(&rfkill_gpio_driver);
-}
-
-static void __exit rfkill_gpio_exit(void)
-{
-       platform_driver_unregister(&rfkill_gpio_driver);
-}
-
-module_init(rfkill_gpio_init);
-module_exit(rfkill_gpio_exit);
+module_platform_driver(rfkill_gpio_driver);
 
 MODULE_DESCRIPTION("gpio rfkill");
 MODULE_AUTHOR("NVIDIA");
index 3ca7277..11da301 100644 (file)
@@ -36,12 +36,12 @@ static int rfkill_regulator_set_block(void *data, bool blocked)
        if (blocked) {
                if (rfkill_data->reg_enabled) {
                        regulator_disable(rfkill_data->vcc);
-                       rfkill_data->reg_enabled = 0;
+                       rfkill_data->reg_enabled = false;
                }
        } else {
                if (!rfkill_data->reg_enabled) {
                        regulator_enable(rfkill_data->vcc);
-                       rfkill_data->reg_enabled = 1;
+                       rfkill_data->reg_enabled = true;
                }
        }
 
@@ -96,7 +96,7 @@ static int __devinit rfkill_regulator_probe(struct platform_device *pdev)
 
        if (regulator_is_enabled(vcc)) {
                dev_dbg(&pdev->dev, "Regulator already enabled\n");
-               rfkill_data->reg_enabled = 1;
+               rfkill_data->reg_enabled = true;
        }
        rfkill_data->vcc = vcc;
        rfkill_data->rf_kill = rf_kill;
@@ -144,17 +144,7 @@ static struct platform_driver rfkill_regulator_driver = {
        },
 };
 
-static int __init rfkill_regulator_init(void)
-{
-       return platform_driver_register(&rfkill_regulator_driver);
-}
-module_init(rfkill_regulator_init);
-
-static void __exit rfkill_regulator_exit(void)
-{
-       platform_driver_unregister(&rfkill_regulator_driver);
-}
-module_exit(rfkill_regulator_exit);
+module_platform_driver(rfkill_regulator_driver);
 
 MODULE_AUTHOR("Guiming Zhuo <gmzhuo@gmail.com>");
 MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
index f99cfce..c3126e8 100644 (file)
@@ -195,7 +195,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
                sp = rxrpc_skb(txb);
 
                if (sp->need_resend) {
-                       sp->need_resend = 0;
+                       sp->need_resend = false;
 
                        /* each Tx packet has a new serial number */
                        sp->hdr.serial =
@@ -216,7 +216,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
                }
 
                if (time_after_eq(jiffies + 1, sp->resend_at)) {
-                       sp->need_resend = 1;
+                       sp->need_resend = true;
                        resend |= 1;
                } else if (resend & 2) {
                        if (time_before(sp->resend_at, resend_at))
@@ -265,7 +265,7 @@ static void rxrpc_resend_timer(struct rxrpc_call *call)
                if (sp->need_resend) {
                        ;
                } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
-                       sp->need_resend = 1;
+                       sp->need_resend = true;
                        resend |= 1;
                } else if (resend & 2) {
                        if (time_before(sp->resend_at, resend_at))
@@ -314,11 +314,11 @@ static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
 
                switch (sacks[loop]) {
                case RXRPC_ACK_TYPE_ACK:
-                       sp->need_resend = 0;
+                       sp->need_resend = false;
                        *p_txb |= 1;
                        break;
                case RXRPC_ACK_TYPE_NACK:
-                       sp->need_resend = 1;
+                       sp->need_resend = true;
                        *p_txb &= ~1;
                        resend = 1;
                        break;
@@ -344,13 +344,13 @@ static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
 
                if (*p_txb & 1) {
                        /* packet must have been discarded */
-                       sp->need_resend = 1;
+                       sp->need_resend = true;
                        *p_txb &= ~1;
                        resend |= 1;
                } else if (sp->need_resend) {
                        ;
                } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
-                       sp->need_resend = 1;
+                       sp->need_resend = true;
                        resend |= 1;
                } else if (resend & 2) {
                        if (time_before(sp->resend_at, resend_at))
index 338d793..16ae887 100644 (file)
@@ -486,7 +486,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
        _proto("Tx DATA %%%u { #%u }",
               ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
 
-       sp->need_resend = 0;
+       sp->need_resend = false;
        sp->resend_at = jiffies + rxrpc_resend_timeout * HZ;
        if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
                _debug("run timer");
@@ -508,7 +508,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
 
        if (ret < 0) {
                _debug("need instant resend %d", ret);
-               sp->need_resend = 1;
+               sp->need_resend = true;
                rxrpc_instant_resend(call);
        }
 
index 51ff194..1d8bd0d 100644 (file)
@@ -285,7 +285,7 @@ static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
        int r;
 
        list_for_each_entry(f, &head->filters, list) {
-               u32 keys[f->nkeys];
+               u32 keys[FLOW_KEY_MAX + 1];
                struct flow_keys flow_keys;
 
                if (!tcf_em_tree_match(skb, &f->ematches, NULL))
index 205d369..bef00ac 100644 (file)
@@ -394,6 +394,7 @@ static void choke_reset(struct Qdisc *sch)
 static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
        [TCA_CHOKE_PARMS]       = { .len = sizeof(struct tc_red_qopt) },
        [TCA_CHOKE_STAB]        = { .len = RED_STAB_SIZE },
+       [TCA_CHOKE_MAX_P]       = { .type = NLA_U32 },
 };
 
 
@@ -415,6 +416,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
        int err;
        struct sk_buff **old = NULL;
        unsigned int mask;
+       u32 max_P;
 
        if (opt == NULL)
                return -EINVAL;
@@ -427,6 +429,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
            tb[TCA_CHOKE_STAB] == NULL)
                return -EINVAL;
 
+       max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
+
        ctl = nla_data(tb[TCA_CHOKE_PARMS]);
 
        if (ctl->limit > CHOKE_MAX_QUEUE)
@@ -476,7 +480,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
 
        red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
                      ctl->Plog, ctl->Scell_log,
-                     nla_data(tb[TCA_CHOKE_STAB]));
+                     nla_data(tb[TCA_CHOKE_STAB]),
+                     max_P);
 
        if (q->head == q->tail)
                red_end_of_idle_period(&q->parms);
@@ -510,6 +515,7 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
                goto nla_put_failure;
 
        NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
+       NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
        return nla_nest_end(skb, opts);
 
 nla_put_failure:
index b9493a0..53204de 100644 (file)
@@ -34,7 +34,7 @@ struct gred_sched;
 
 struct gred_sched_data {
        u32             limit;          /* HARD maximal queue length    */
-       u32             DP;             /* the drop pramaters */
+       u32             DP;             /* the drop parameters */
        u32             bytesin;        /* bytes seen on virtualQ so far*/
        u32             packetsin;      /* packets seen on virtualQ so far*/
        u32             backlog;        /* bytes on the virtualQ */
@@ -379,18 +379,20 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 }
 
 static inline int gred_change_vq(struct Qdisc *sch, int dp,
-                                struct tc_gred_qopt *ctl, int prio, u8 *stab)
+                                struct tc_gred_qopt *ctl, int prio,
+                                u8 *stab, u32 max_P,
+                                struct gred_sched_data **prealloc)
 {
        struct gred_sched *table = qdisc_priv(sch);
-       struct gred_sched_data *q;
+       struct gred_sched_data *q = table->tab[dp];
 
-       if (table->tab[dp] == NULL) {
-               table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
-               if (table->tab[dp] == NULL)
+       if (!q) {
+               table->tab[dp] = q = *prealloc;
+               *prealloc = NULL;
+               if (!q)
                        return -ENOMEM;
        }
 
-       q = table->tab[dp];
        q->DP = dp;
        q->prio = prio;
        q->limit = ctl->limit;
@@ -400,7 +402,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
 
        red_set_parms(&q->parms,
                      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
-                     ctl->Scell_log, stab);
+                     ctl->Scell_log, stab, max_P);
 
        return 0;
 }
@@ -409,6 +411,7 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
        [TCA_GRED_PARMS]        = { .len = sizeof(struct tc_gred_qopt) },
        [TCA_GRED_STAB]         = { .len = 256 },
        [TCA_GRED_DPS]          = { .len = sizeof(struct tc_gred_sopt) },
+       [TCA_GRED_MAX_P]        = { .type = NLA_U32 },
 };
 
 static int gred_change(struct Qdisc *sch, struct nlattr *opt)
@@ -418,6 +421,8 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
        struct nlattr *tb[TCA_GRED_MAX + 1];
        int err, prio = GRED_DEF_PRIO;
        u8 *stab;
+       u32 max_P;
+       struct gred_sched_data *prealloc;
 
        if (opt == NULL)
                return -EINVAL;
@@ -433,6 +438,8 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
            tb[TCA_GRED_STAB] == NULL)
                return -EINVAL;
 
+       max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
+
        err = -EINVAL;
        ctl = nla_data(tb[TCA_GRED_PARMS]);
        stab = nla_data(tb[TCA_GRED_STAB]);
@@ -455,9 +462,10 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
                        prio = ctl->prio;
        }
 
+       prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
        sch_tree_lock(sch);
 
-       err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
+       err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
        if (err < 0)
                goto errout_locked;
 
@@ -471,6 +479,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
 
 errout_locked:
        sch_tree_unlock(sch);
+       kfree(prealloc);
 errout:
        return err;
 }
@@ -498,6 +507,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
        struct gred_sched *table = qdisc_priv(sch);
        struct nlattr *parms, *opts = NULL;
        int i;
+       u32 max_p[MAX_DPs];
        struct tc_gred_sopt sopt = {
                .DPs    = table->DPs,
                .def_DP = table->def,
@@ -509,6 +519,14 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (opts == NULL)
                goto nla_put_failure;
        NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
+
+       for (i = 0; i < MAX_DPs; i++) {
+               struct gred_sched_data *q = table->tab[i];
+
+               max_p[i] = q ? q->parms.max_P : 0;
+       }
+       NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p);
+
        parms = nla_nest_start(skb, TCA_GRED_PARMS);
        if (parms == NULL)
                goto nla_put_failure;
index 6488e64..9bdca2e 100644 (file)
@@ -1368,6 +1368,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        struct tc_hfsc_stats xstats;
 
        cl->qstats.qlen = cl->qdisc->q.qlen;
+       cl->qstats.backlog = cl->qdisc->qstats.backlog;
        xstats.level   = cl->level;
        xstats.period  = cl->cl_vtperiod;
        xstats.work    = cl->cl_total;
@@ -1561,6 +1562,15 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
        struct hfsc_sched *q = qdisc_priv(sch);
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_hfsc_qopt qopt;
+       struct hfsc_class *cl;
+       struct hlist_node *n;
+       unsigned int i;
+
+       sch->qstats.backlog = 0;
+       for (i = 0; i < q->clhash.hashsize; i++) {
+               hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
+                       sch->qstats.backlog += cl->qdisc->qstats.backlog;
+       }
 
        qopt.defcls = q->defcls;
        NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
index f88256c..28de430 100644 (file)
@@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
        if (!netif_is_multiqueue(dev))
                return -EOPNOTSUPP;
 
-       if (nla_len(opt) < sizeof(*qopt))
+       if (!opt || nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
 
        qopt = nla_data(opt);
index 3bfd733..ffcaa59 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
+#include <linux/reciprocal_div.h>
 
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
@@ -80,6 +81,10 @@ struct netem_sched_data {
        u32 reorder;
        u32 corrupt;
        u32 rate;
+       s32 packet_overhead;
+       u32 cell_size;
+       u32 cell_size_reciprocal;
+       s32 cell_overhead;
 
        struct crndstate {
                u32 last;
@@ -299,11 +304,23 @@ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
        return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 }
 
-static psched_time_t packet_len_2_sched_time(unsigned int len, u32 rate)
+static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
 {
-       u64 ticks = (u64)len * NSEC_PER_SEC;
+       u64 ticks;
 
-       do_div(ticks, rate);
+       len += q->packet_overhead;
+
+       if (q->cell_size) {
+               u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
+
+               if (len > cells * q->cell_size) /* extra cell needed for remainder */
+                       cells++;
+               len = cells * (q->cell_size + q->cell_overhead);
+       }
+
+       ticks = (u64)len * NSEC_PER_SEC;
+
+       do_div(ticks, q->rate);
        return PSCHED_NS2TICKS(ticks);
 }
 
@@ -384,7 +401,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                if (q->rate) {
                        struct sk_buff_head *list = &q->qdisc->q;
 
-                       delay += packet_len_2_sched_time(skb->len, q->rate);
+                       delay += packet_len_2_sched_time(skb->len, q);
 
                        if (!skb_queue_empty(list)) {
                                /*
@@ -568,6 +585,11 @@ static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
        const struct tc_netem_rate *r = nla_data(attr);
 
        q->rate = r->rate;
+       q->packet_overhead = r->packet_overhead;
+       q->cell_size = r->cell_size;
+       if (q->cell_size)
+               q->cell_size_reciprocal = reciprocal_value(q->cell_size);
+       q->cell_overhead = r->cell_overhead;
 }
 
 static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
@@ -583,7 +605,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
                case NETEM_LOSS_GI: {
                        const struct tc_netem_gimodel *gi = nla_data(la);
 
-                       if (nla_len(la) != sizeof(struct tc_netem_gimodel)) {
+                       if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
                                pr_info("netem: incorrect gi model size\n");
                                return -EINVAL;
                        }
@@ -602,8 +624,8 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
                case NETEM_LOSS_GE: {
                        const struct tc_netem_gemodel *ge = nla_data(la);
 
-                       if (nla_len(la) != sizeof(struct tc_netem_gemodel)) {
-                               pr_info("netem: incorrect gi model size\n");
+                       if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
+                               pr_info("netem: incorrect ge model size\n");
                                return -EINVAL;
                        }
 
@@ -909,6 +931,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
        NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
 
        rate.rate = q->rate;
+       rate.packet_overhead = q->packet_overhead;
+       rate.cell_size = q->cell_size;
+       rate.cell_overhead = q->cell_overhead;
        NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
 
        if (dump_loss_model(q, skb) != 0)
index d617161..ce2256a 100644 (file)
@@ -39,6 +39,7 @@
 struct red_sched_data {
        u32                     limit;          /* HARD maximal queue length */
        unsigned char           flags;
+       struct timer_list       adapt_timer;
        struct red_parms        parms;
        struct red_stats        stats;
        struct Qdisc            *qdisc;
@@ -161,12 +162,15 @@ static void red_reset(struct Qdisc *sch)
 static void red_destroy(struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
+
+       del_timer_sync(&q->adapt_timer);
        qdisc_destroy(q->qdisc);
 }
 
 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
        [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
        [TCA_RED_STAB]  = { .len = RED_STAB_SIZE },
+       [TCA_RED_MAX_P] = { .type = NLA_U32 },
 };
 
 static int red_change(struct Qdisc *sch, struct nlattr *opt)
@@ -176,6 +180,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        struct tc_red_qopt *ctl;
        struct Qdisc *child = NULL;
        int err;
+       u32 max_P;
 
        if (opt == NULL)
                return -EINVAL;
@@ -188,6 +193,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
            tb[TCA_RED_STAB] == NULL)
                return -EINVAL;
 
+       max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
+
        ctl = nla_data(tb[TCA_RED_PARMS]);
 
        if (ctl->limit > 0) {
@@ -206,8 +213,13 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        }
 
        red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
-                                ctl->Plog, ctl->Scell_log,
-                                nla_data(tb[TCA_RED_STAB]));
+                     ctl->Plog, ctl->Scell_log,
+                     nla_data(tb[TCA_RED_STAB]),
+                     max_P);
+
+       del_timer(&q->adapt_timer);
+       if (ctl->flags & TC_RED_ADAPTATIVE)
+               mod_timer(&q->adapt_timer, jiffies + HZ/2);
 
        if (!q->qdisc->q.qlen)
                red_start_of_idle_period(&q->parms);
@@ -216,11 +228,24 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
+static inline void red_adaptative_timer(unsigned long arg)
+{
+       struct Qdisc *sch = (struct Qdisc *)arg;
+       struct red_sched_data *q = qdisc_priv(sch);
+       spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+
+       spin_lock(root_lock);
+       red_adaptative_algo(&q->parms);
+       mod_timer(&q->adapt_timer, jiffies + HZ/2);
+       spin_unlock(root_lock);
+}
+
 static int red_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
        q->qdisc = &noop_qdisc;
+       setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch);
        return red_change(sch, opt);
 }
 
@@ -243,6 +268,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (opts == NULL)
                goto nla_put_failure;
        NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
+       NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P);
        return nla_nest_end(skb, opts);
 
 nla_put_failure:
index 30cda70..d329a8a 100644 (file)
@@ -136,16 +136,30 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
        return &q->dep[val - SFQ_SLOTS];
 }
 
+/*
+ * In order to be able to quickly rehash our queue when timer changes
+ * q->perturbation, we store flow_keys in skb->cb[]
+ */
+struct sfq_skb_cb {
+       struct flow_keys        keys;
+};
+
+static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(skb->cb) <
+               sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb));
+       return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
 static unsigned int sfq_hash(const struct sfq_sched_data *q,
                             const struct sk_buff *skb)
 {
-       struct flow_keys keys;
+       const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
        unsigned int hash;
 
-       skb_flow_dissect(skb, &keys);
-       hash = jhash_3words((__force u32)keys.dst,
-                           (__force u32)keys.src ^ keys.ip_proto,
-                           (__force u32)keys.ports, q->perturbation);
+       hash = jhash_3words((__force u32)keys->dst,
+                           (__force u32)keys->src ^ keys->ip_proto,
+                           (__force u32)keys->ports, q->perturbation);
        return hash & (q->divisor - 1);
 }
 
@@ -161,8 +175,10 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
            TC_H_MIN(skb->priority) <= q->divisor)
                return TC_H_MIN(skb->priority);
 
-       if (!q->filter_list)
+       if (!q->filter_list) {
+               skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
                return sfq_hash(q, skb) + 1;
+       }
 
        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        result = tc_classify(skb, q->filter_list, &res);
@@ -423,12 +439,71 @@ sfq_reset(struct Qdisc *sch)
                kfree_skb(skb);
 }
 
+/*
+ * When q->perturbation is changed, we rehash all queued skbs
+ * to avoid OOO (Out Of Order) effects.
+ * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
+ * counters.
+ */
+static void sfq_rehash(struct sfq_sched_data *q)
+{
+       struct sk_buff *skb;
+       int i;
+       struct sfq_slot *slot;
+       struct sk_buff_head list;
+
+       __skb_queue_head_init(&list);
+
+       for (i = 0; i < SFQ_SLOTS; i++) {
+               slot = &q->slots[i];
+               if (!slot->qlen)
+                       continue;
+               while (slot->qlen) {
+                       skb = slot_dequeue_head(slot);
+                       sfq_dec(q, i);
+                       __skb_queue_tail(&list, skb);
+               }
+               q->ht[slot->hash] = SFQ_EMPTY_SLOT;
+       }
+       q->tail = NULL;
+
+       while ((skb = __skb_dequeue(&list)) != NULL) {
+               unsigned int hash = sfq_hash(q, skb);
+               sfq_index x = q->ht[hash];
+
+               slot = &q->slots[x];
+               if (x == SFQ_EMPTY_SLOT) {
+                       x = q->dep[0].next; /* get a free slot */
+                       q->ht[hash] = x;
+                       slot = &q->slots[x];
+                       slot->hash = hash;
+               }
+               slot_queue_add(slot, skb);
+               sfq_inc(q, x);
+               if (slot->qlen == 1) {          /* The flow is new */
+                       if (q->tail == NULL) {  /* It is the first flow */
+                               slot->next = x;
+                       } else {
+                               slot->next = q->tail->next;
+                               q->tail->next = x;
+                       }
+                       q->tail = slot;
+                       slot->allot = q->scaled_quantum;
+               }
+       }
+}
+
 static void sfq_perturbation(unsigned long arg)
 {
        struct Qdisc *sch = (struct Qdisc *)arg;
        struct sfq_sched_data *q = qdisc_priv(sch);
+       spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
 
+       spin_lock(root_lock);
        q->perturbation = net_random();
+       if (!q->filter_list && q->tail)
+               sfq_rehash(q);
+       spin_unlock(root_lock);
 
        if (q->perturb_period)
                mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
index ed1336e..4532659 100644 (file)
@@ -277,7 +277,7 @@ static inline int teql_resolve(struct sk_buff *skb,
                return 0;
 
        rcu_read_lock();
-       mn = dst_get_neighbour(dst);
+       mn = dst_get_neighbour_noref(dst);
        res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
        rcu_read_unlock();
 
index 152b5b3..acd2edb 100644 (file)
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-               (unsigned long)sp->autoclose * HZ;
+               min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
 
        /* Initializes the timers */
        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
index b7692aa..80f71af 100644 (file)
@@ -105,7 +105,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
 struct sctp_input_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
index 08b3cea..817174e 100644 (file)
@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
        /* Keep track of how many bytes are in flight to the receiver. */
        asoc->outqueue.outstanding_bytes += datasize;
 
-       /* Update our view of the receiver's rwnd. Include sk_buff overhead
-        * while updating peer.rwnd so that it reduces the chances of a
-        * receiver running out of receive buffer space even when receive
-        * window is still open. This can happen when a sender is sending
-        * sending small messages.
-        */
-       datasize += sizeof(struct sk_buff);
+       /* Update our view of the receiver's rwnd. */
        if (datasize < rwnd)
                rwnd -= datasize;
        else
index 14c2b06..cfeb1d4 100644 (file)
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                                        chunk->transport->flight_size -=
                                                        sctp_data_size(chunk);
                                q->outstanding_bytes -= sctp_data_size(chunk);
-                               q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                                       sizeof(struct sk_buff));
+                               q->asoc->peer.rwnd += sctp_data_size(chunk);
                        }
                        continue;
                }
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                         * (Section 7.2.4)), add the data size of those
                         * chunks to the rwnd.
                         */
-                       q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                               sizeof(struct sk_buff));
+                       q->asoc->peer.rwnd += sctp_data_size(chunk);
                        q->outstanding_bytes -= sctp_data_size(chunk);
                        if (chunk->transport)
                                transport->flight_size -= sctp_data_size(chunk);
index 61b9fca..5942d27 100644 (file)
@@ -637,7 +637,7 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                    " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
                    addrw);
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                /* Now we send an ASCONF for each association */
                /* Note. we currently don't handle link local IPv6 addressees */
                if (addrw->a.sa.sa_family == AF_INET6) {
@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_max_instreams              = SCTP_DEFAULT_INSTREAMS;
        sctp_max_outstreams             = SCTP_DEFAULT_OUTSTREAMS;
 
+       /* Initialize maximum autoclose timeout. */
+       sctp_max_autoclose              = INT_MAX / HZ;
+
        /* Initialize handle used for association ids. */
        idr_init(&sctp_assocs_id);
 
index d56c07a..408ebd0 100644 (file)
@@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
                return -EINVAL;
        if (copy_from_user(&sp->autoclose, optval, optlen))
                return -EFAULT;
-       /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
-       sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
 
        return 0;
 }
@@ -6841,7 +6839,7 @@ struct proto sctp_prot = {
        .sockets_allocated = &sctp_sockets_allocated,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 struct proto sctpv6_prot = {
        .name           = "SCTPv6",
@@ -6872,4 +6870,4 @@ struct proto sctpv6_prot = {
        .memory_allocated = &sctp_memory_allocated,
        .sockets_allocated = &sctp_sockets_allocated,
 };
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
index 6b39529..60ffbd0 100644 (file)
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
 static int sack_timer_max = 500;
 static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
 static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+       (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+       ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
 
 extern long sysctl_sctp_mem[3];
 extern int sysctl_sctp_rmem[3];
@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
                .extra1         = &one,
                .extra2         = &rwnd_scale_max,
        },
+       {
+               .procname       = "max_autoclose",
+               .data           = &sctp_max_autoclose,
+               .maxlen         = sizeof(unsigned long),
+               .mode           = 0644,
+               .proc_handler   = &proc_doulongvec_minmax,
+               .extra1         = &max_autoclose_min,
+               .extra2         = &max_autoclose_max,
+       },
 
        { /* sentinel */ }
 };
index 67a655e..ee77742 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
                                  char *buf, const int buflen)
@@ -91,7 +91,7 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
        return len;
 }
 
-#else  /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+#else  /* !IS_ENABLED(CONFIG_IPV6) */
 
 static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
                                  char *buf, const int buflen)
@@ -105,7 +105,7 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
        return 0;
 }
 
-#endif /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+#endif /* !IS_ENABLED(CONFIG_IPV6) */
 
 static int rpc_ntop4(const struct sockaddr *sap,
                     char *buf, const size_t buflen)
@@ -155,7 +155,7 @@ static size_t rpc_pton4(const char *buf, const size_t buflen,
        return sizeof(struct sockaddr_in);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int rpc_parse_scope_id(const char *buf, const size_t buflen,
                              const char *delim, struct sockaddr_in6 *sin6)
 {
index d12ffa5..00a1a2a 100644 (file)
@@ -590,6 +590,27 @@ void rpc_prepare_task(struct rpc_task *task)
        task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 }
 
+static void
+rpc_init_task_statistics(struct rpc_task *task)
+{
+       /* Initialize retry counters */
+       task->tk_garb_retry = 2;
+       task->tk_cred_retry = 2;
+       task->tk_rebind_retry = 2;
+
+       /* starting timestamp */
+       task->tk_start = ktime_get();
+}
+
+static void
+rpc_reset_task_statistics(struct rpc_task *task)
+{
+       task->tk_timeouts = 0;
+       task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
+
+       rpc_init_task_statistics(task);
+}
+
 /*
  * Helper that calls task->tk_ops->rpc_call_done if it exists
  */
@@ -602,6 +623,7 @@ void rpc_exit_task(struct rpc_task *task)
                        WARN_ON(RPC_ASSASSINATED(task));
                        /* Always release the RPC slot and buffer memory */
                        xprt_release(task);
+                       rpc_reset_task_statistics(task);
                }
        }
 }
@@ -804,11 +826,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        task->tk_calldata = task_setup_data->callback_data;
        INIT_LIST_HEAD(&task->tk_task);
 
-       /* Initialize retry counters */
-       task->tk_garb_retry = 2;
-       task->tk_cred_retry = 2;
-       task->tk_rebind_retry = 2;
-
        task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
        task->tk_owner = current->tgid;
 
@@ -818,8 +835,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        if (task->tk_ops->rpc_call_prepare != NULL)
                task->tk_action = rpc_prepare_task;
 
-       /* starting timestamp */
-       task->tk_start = ktime_get();
+       rpc_init_task_statistics(task);
 
        dprintk("RPC:       new task initialized, procpid %u\n",
                                task_pid_nr(current));
index 6e03888..9d01d46 100644 (file)
@@ -826,7 +826,7 @@ static int __svc_rpcb_register4(const u32 program, const u32 version,
        return error;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /*
  * Register an "inet6" protocol family netid with the local
  * rpcbind daemon via an rpcbind v4 SET request.
@@ -872,7 +872,7 @@ static int __svc_rpcb_register6(const u32 program, const u32 version,
 
        return error;
 }
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 
 /*
  * Register a kernel RPC service via rpcbind version 4.
@@ -893,11 +893,11 @@ static int __svc_register(const char *progname,
                error = __svc_rpcb_register4(program, version,
                                                protocol, port);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case PF_INET6:
                error = __svc_rpcb_register6(program, version,
                                                protocol, port);
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
        }
 
        if (error < 0)
index 447cd0e..38649cf 100644 (file)
@@ -179,13 +179,13 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
                .sin_addr.s_addr        = htonl(INADDR_ANY),
                .sin_port               = htons(port),
        };
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct sockaddr_in6 sin6 = {
                .sin6_family            = AF_INET6,
                .sin6_addr              = IN6ADDR_ANY_INIT,
                .sin6_port              = htons(port),
        };
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
        struct sockaddr *sap;
        size_t len;
 
@@ -194,12 +194,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
                sap = (struct sockaddr *)&sin;
                len = sizeof(sin);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case PF_INET6:
                sap = (struct sockaddr *)&sin6;
                len = sizeof(sin6);
                break;
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
        default:
                return ERR_PTR(-EAFNOSUPPORT);
        }
index fe258fc..01153ea 100644 (file)
@@ -220,7 +220,7 @@ static int ip_map_parse(struct cache_detail *cd,
                ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
                                &sin6.sin6_addr);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                memcpy(&sin6, &address.s6, sizeof(sin6));
                break;
index f4385e4..c64c0ef 100644 (file)
@@ -995,13 +995,11 @@ out_init_req:
 
 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
-       if (xprt_dynamic_free_slot(xprt, req))
-               return;
-
-       memset(req, 0, sizeof(*req));   /* mark unused */
-
        spin_lock(&xprt->reserve_lock);
-       list_add(&req->rq_list, &xprt->free);
+       if (!xprt_dynamic_free_slot(xprt, req)) {
+               memset(req, 0, sizeof(*req));   /* mark unused */
+               list_add(&req->rq_list, &xprt->free);
+       }
        rpc_wake_up_next(&xprt->backlog);
        spin_unlock(&xprt->reserve_lock);
 }
index 8311689..9e37b78 100644 (file)
@@ -110,8 +110,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
 
        /* allocate table & mark all entries as uninitialized */
 
-       table = __vmalloc(actual_size * sizeof(struct reference),
-                         GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+       table = vzalloc(actual_size * sizeof(struct reference));
        if (table == NULL)
                return -ENOMEM;
 
index 5a69733..c2128b1 100644 (file)
@@ -19,3 +19,10 @@ config UNIX
 
          Say Y unless you know what you are doing.
 
+config UNIX_DIAG
+       tristate "UNIX: socket monitoring interface"
+       depends on UNIX
+       default UNIX
+       ---help---
+         Support for UNIX socket monitoring interface used by the ss tool.
+         If unsure, say Y.
index b852a2b..b663c60 100644 (file)
@@ -6,3 +6,6 @@ obj-$(CONFIG_UNIX)      += unix.o
 
 unix-y                 := af_unix.o garbage.o
 unix-$(CONFIG_SYSCTL)  += sysctl_net_unix.o
+
+obj-$(CONFIG_UNIX_DIAG)        += unix_diag.o
+unix_diag-y            := diag.o
index b595a3d..e1b9358 100644 (file)
 #include <net/checksum.h>
 #include <linux/security.h>
 
-static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
-static DEFINE_SPINLOCK(unix_table_lock);
+struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
+EXPORT_SYMBOL_GPL(unix_socket_table);
+DEFINE_SPINLOCK(unix_table_lock);
+EXPORT_SYMBOL_GPL(unix_table_lock);
 static atomic_long_t unix_nr_socks;
 
 #define unix_sockets_unbound   (&unix_socket_table[UNIX_HASH_SIZE])
@@ -172,7 +174,7 @@ static inline int unix_recvq_full(struct sock const *sk)
        return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 }
 
-static struct sock *unix_peer_get(struct sock *s)
+struct sock *unix_peer_get(struct sock *s)
 {
        struct sock *peer;
 
@@ -183,6 +185,7 @@ static struct sock *unix_peer_get(struct sock *s)
        unix_state_unlock(s);
        return peer;
 }
+EXPORT_SYMBOL_GPL(unix_peer_get);
 
 static inline void unix_release_addr(struct unix_address *addr)
 {
diff --git a/net/unix/diag.c b/net/unix/diag.c
new file mode 100644 (file)
index 0000000..91d5782
--- /dev/null
@@ -0,0 +1,314 @@
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/sock_diag.h>
+#include <linux/unix_diag.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <net/netlink.h>
+#include <net/af_unix.h>
+#include <net/tcp_states.h>
+
+#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
+       RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
+
+static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
+{
+       struct unix_address *addr = unix_sk(sk)->addr;
+       char *s;
+
+       if (addr) {
+               s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
+               memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
+       }
+
+       return 0;
+
+rtattr_failure:
+       return -EMSGSIZE;
+}
+
+static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
+{
+       struct dentry *dentry = unix_sk(sk)->dentry;
+       struct unix_diag_vfs *uv;
+
+       if (dentry) {
+               uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv));
+               uv->udiag_vfs_ino = dentry->d_inode->i_ino;
+               uv->udiag_vfs_dev = dentry->d_sb->s_dev;
+       }
+
+       return 0;
+
+rtattr_failure:
+       return -EMSGSIZE;
+}
+
+static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
+{
+       struct sock *peer;
+       int ino;
+
+       peer = unix_peer_get(sk);
+       if (peer) {
+               unix_state_lock(peer);
+               ino = sock_i_ino(peer);
+               unix_state_unlock(peer);
+               sock_put(peer);
+
+               RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino);
+       }
+
+       return 0;
+rtattr_failure:
+       return -EMSGSIZE;
+}
+
+static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
+{
+       struct sk_buff *skb;
+       u32 *buf;
+       int i;
+
+       if (sk->sk_state == TCP_LISTEN) {
+               spin_lock(&sk->sk_receive_queue.lock);
+               buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS, sk->sk_receive_queue.qlen);
+               i = 0;
+               skb_queue_walk(&sk->sk_receive_queue, skb) {
+                       struct sock *req, *peer;
+
+                       req = skb->sk;
+                       /*
+                        * The state lock is outer for the same sk's
+                        * queue lock. With the other's queue locked it's
+                        * OK to lock the state.
+                        */
+                       unix_state_lock_nested(req);
+                       peer = unix_sk(req)->peer;
+                       if (peer)
+                               buf[i++] = sock_i_ino(peer);
+                       unix_state_unlock(req);
+               }
+               spin_unlock(&sk->sk_receive_queue.lock);
+       }
+
+       return 0;
+
+rtattr_failure:
+       spin_unlock(&sk->sk_receive_queue.lock);
+       return -EMSGSIZE;
+}
+
+static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
+{
+       RTA_PUT_U32(nlskb, UNIX_DIAG_RQLEN, sk->sk_receive_queue.qlen);
+       return 0;
+
+rtattr_failure:
+       return -EMSGSIZE;
+}
+
+static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
+               u32 pid, u32 seq, u32 flags, int sk_ino)
+{
+       unsigned char *b = skb_tail_pointer(skb);
+       struct nlmsghdr *nlh;
+       struct unix_diag_msg *rep;
+
+       nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep));
+       nlh->nlmsg_flags = flags;
+
+       rep = NLMSG_DATA(nlh);
+
+       rep->udiag_family = AF_UNIX;
+       rep->udiag_type = sk->sk_type;
+       rep->udiag_state = sk->sk_state;
+       rep->udiag_ino = sk_ino;
+       sock_diag_save_cookie(sk, rep->udiag_cookie);
+
+       if ((req->udiag_show & UDIAG_SHOW_NAME) &&
+                       sk_diag_dump_name(sk, skb))
+               goto nlmsg_failure;
+
+       if ((req->udiag_show & UDIAG_SHOW_VFS) &&
+                       sk_diag_dump_vfs(sk, skb))
+               goto nlmsg_failure;
+
+       if ((req->udiag_show & UDIAG_SHOW_PEER) &&
+                       sk_diag_dump_peer(sk, skb))
+               goto nlmsg_failure;
+
+       if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
+                       sk_diag_dump_icons(sk, skb))
+               goto nlmsg_failure;
+
+       if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
+                       sk_diag_show_rqlen(sk, skb))
+               goto nlmsg_failure;
+
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
+       return skb->len;
+
+nlmsg_failure:
+       nlmsg_trim(skb, b);
+       return -EMSGSIZE;
+}
+
+static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
+               u32 pid, u32 seq, u32 flags)
+{
+       int sk_ino;
+
+       unix_state_lock(sk);
+       sk_ino = sock_i_ino(sk);
+       unix_state_unlock(sk);
+
+       if (!sk_ino)
+               return 0;
+
+       return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
+}
+
+static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct unix_diag_req *req;
+       int num, s_num, slot, s_slot;
+
+       req = NLMSG_DATA(cb->nlh);
+
+       s_slot = cb->args[0];
+       num = s_num = cb->args[1];
+
+       spin_lock(&unix_table_lock);
+       for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) {
+               struct sock *sk;
+               struct hlist_node *node;
+
+               num = 0;
+               sk_for_each(sk, node, &unix_socket_table[slot]) {
+                       if (num < s_num)
+                               goto next;
+                       if (!(req->udiag_states & (1 << sk->sk_state)))
+                               goto next;
+                       if (sk_diag_dump(sk, skb, req,
+                                               NETLINK_CB(cb->skb).pid,
+                                               cb->nlh->nlmsg_seq,
+                                               NLM_F_MULTI) < 0)
+                               goto done;
+next:
+                       num++;
+               }
+       }
+done:
+       spin_unlock(&unix_table_lock);
+       cb->args[0] = slot;
+       cb->args[1] = num;
+
+       return skb->len;
+}
+
+static struct sock *unix_lookup_by_ino(int ino)
+{
+       int i;
+       struct sock *sk;
+
+       spin_lock(&unix_table_lock);
+       for (i = 0; i <= UNIX_HASH_SIZE; i++) {
+               struct hlist_node *node;
+
+               sk_for_each(sk, node, &unix_socket_table[i])
+                       if (ino == sock_i_ino(sk)) {
+                               sock_hold(sk);
+                               spin_unlock(&unix_table_lock);
+
+                               return sk;
+                       }
+       }
+
+       spin_unlock(&unix_table_lock);
+       return NULL;
+}
+
+static int unix_diag_get_exact(struct sk_buff *in_skb,
+                              const struct nlmsghdr *nlh,
+                              struct unix_diag_req *req)
+{
+       int err = -EINVAL;
+       struct sock *sk;
+       struct sk_buff *rep;
+       unsigned int extra_len;
+
+       if (req->udiag_ino == 0)
+               goto out_nosk;
+
+       sk = unix_lookup_by_ino(req->udiag_ino);
+       err = -ENOENT;
+       if (sk == NULL)
+               goto out_nosk;
+
+       err = sock_diag_check_cookie(sk, req->udiag_cookie);
+       if (err)
+               goto out;
+
+       extra_len = 256;
+again:
+       err = -ENOMEM;
+       rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)),
+                       GFP_KERNEL);
+       if (!rep)
+               goto out;
+
+       err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
+                          nlh->nlmsg_seq, 0, req->udiag_ino);
+       if (err < 0) {
+               kfree_skb(rep);
+               extra_len += 256;
+               if (extra_len >= PAGE_SIZE)
+                       goto out;
+
+               goto again;
+       }
+       err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+                             MSG_DONTWAIT);
+       if (err > 0)
+               err = 0;
+out:
+       if (sk)
+               sock_put(sk);
+out_nosk:
+       return err;
+}
+
+static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+{
+       int hdrlen = sizeof(struct unix_diag_req);
+
+       if (nlmsg_len(h) < hdrlen)
+               return -EINVAL;
+
+       if (h->nlmsg_flags & NLM_F_DUMP)
+               return netlink_dump_start(sock_diag_nlsk, skb, h,
+                                         unix_diag_dump, NULL, 0);
+       else
+               return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
+}
+
+static struct sock_diag_handler unix_diag_handler = {
+       .family = AF_UNIX,
+       .dump = unix_diag_handler_dump,
+};
+
+static int __init unix_diag_init(void)
+{
+       return sock_diag_register(&unix_diag_handler);
+}
+
+static void __exit unix_diag_exit(void)
+{
+       sock_diag_unregister(&unix_diag_handler);
+}
+
+module_init(unix_diag_init);
+module_exit(unix_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);
index 1f1ef70..2e4444f 100644 (file)
@@ -121,15 +121,16 @@ config CFG80211_WEXT
 
 config WIRELESS_EXT_SYSFS
        bool "Wireless extensions sysfs files"
-       default y
        depends on WEXT_CORE && SYSFS
        help
          This option enables the deprecated wireless statistics
          files in /sys/class/net/*/wireless/. The same information
          is available via the ioctls as well.
 
-         Say Y if you have programs using it, like old versions of
-         hal.
+         Say N. If you know you have ancient tools requiring it,
+         like very old versions of hal (prior to 0.5.12 release),
+         say Y and update the tools as soon as possible as this
+         option will be removed soon.
 
 config LIB80211
        tristate "Common routines for IEEE802.11 drivers"
index 17cd0c0..2fcfe09 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright 2009      Johannes Berg <johannes@sipsolutions.net>
  */
 
+#include <linux/export.h>
 #include <net/cfg80211.h>
 #include "core.h"
 
@@ -44,9 +45,9 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
        return chan;
 }
 
-static bool can_beacon_sec_chan(struct wiphy *wiphy,
-                               struct ieee80211_channel *chan,
-                               enum nl80211_channel_type channel_type)
+int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+                                 struct ieee80211_channel *chan,
+                                 enum nl80211_channel_type channel_type)
 {
        struct ieee80211_channel *sec_chan;
        int diff;
@@ -75,6 +76,7 @@ static bool can_beacon_sec_chan(struct wiphy *wiphy,
 
        return true;
 }
+EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan);
 
 int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
                      struct wireless_dev *wdev, int freq,
@@ -109,8 +111,8 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
                switch (channel_type) {
                case NL80211_CHAN_HT40PLUS:
                case NL80211_CHAN_HT40MINUS:
-                       if (!can_beacon_sec_chan(&rdev->wiphy, chan,
-                                                channel_type)) {
+                       if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, chan,
+                                                         channel_type)) {
                                printk(KERN_DEBUG
                                       "cfg80211: Secondary channel not "
                                       "allowed to initiate communication\n");
index fb08c28..43ad9c8 100644 (file)
@@ -249,12 +249,11 @@ struct cfg80211_event {
                        u16 status;
                } cr;
                struct {
-                       struct ieee80211_channel *channel;
-                       u8 bssid[ETH_ALEN];
                        const u8 *req_ie;
                        const u8 *resp_ie;
                        size_t req_ie_len;
                        size_t resp_ie_len;
+                       struct cfg80211_bss *bss;
                } rm;
                struct {
                        const u8 *ie;
@@ -403,8 +402,7 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
                        struct net_device *dev, u16 reason,
                        bool wextev);
 void __cfg80211_roamed(struct wireless_dev *wdev,
-                      struct ieee80211_channel *channel,
-                      const u8 *bssid,
+                      struct cfg80211_bss *bss,
                       const u8 *req_ie, size_t req_ie_len,
                       const u8 *resp_ie, size_t resp_ie_len);
 int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
index b7b7868..8c550df 100644 (file)
@@ -20,6 +20,7 @@
  * interface
  */
 #define MESH_PREQ_MIN_INT      10
+#define MESH_PERR_MIN_INT      100
 #define MESH_DIAM_TRAVERSAL_TIME 50
 
 /*
@@ -47,6 +48,7 @@ const struct mesh_config default_mesh_config = {
        .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS,
        .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT,
        .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT,
+       .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT,
        .dot11MeshHWMPnetDiameterTraversalTime = MESH_DIAM_TRAVERSAL_TIME,
        .dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES,
        .path_refresh_time = MESH_PATH_REFRESH_TIME,
index eee9ccc..b07c4fc 100644 (file)
@@ -47,22 +47,21 @@ static struct genl_family nl80211_fam = {
 };
 
 /* internal helper: get rdev and dev */
-static int get_rdev_dev_by_info_ifindex(struct genl_info *info,
-                                      struct cfg80211_registered_device **rdev,
-                                      struct net_device **dev)
+static int get_rdev_dev_by_ifindex(struct net *netns, struct nlattr **attrs,
+                                  struct cfg80211_registered_device **rdev,
+                                  struct net_device **dev)
 {
-       struct nlattr **attrs = info->attrs;
        int ifindex;
 
        if (!attrs[NL80211_ATTR_IFINDEX])
                return -EINVAL;
 
        ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
-       *dev = dev_get_by_index(genl_info_net(info), ifindex);
+       *dev = dev_get_by_index(netns, ifindex);
        if (!*dev)
                return -ENODEV;
 
-       *rdev = cfg80211_get_dev_from_ifindex(genl_info_net(info), ifindex);
+       *rdev = cfg80211_get_dev_from_ifindex(netns, ifindex);
        if (IS_ERR(*rdev)) {
                dev_put(*dev);
                return PTR_ERR(*rdev);
@@ -204,6 +203,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_HT_CAPABILITY_MASK] = {
                .len = NL80211_HT_CAPABILITY_LEN
        },
+       [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 },
 };
 
 /* policy for the key attributes */
@@ -904,6 +904,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
                CMD(sched_scan_start, START_SCHED_SCAN);
        CMD(probe_client, PROBE_CLIENT);
+       CMD(set_noack_map, SET_NOACK_MAP);
        if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
                i++;
                NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
@@ -1759,6 +1760,23 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
        return rdev->ops->del_virtual_intf(&rdev->wiphy, dev);
 }
 
+static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct net_device *dev = info->user_ptr[1];
+       u16 noack_map;
+
+       if (!info->attrs[NL80211_ATTR_NOACK_MAP])
+               return -EINVAL;
+
+       if (!rdev->ops->set_noack_map)
+               return -EOPNOTSUPP;
+
+       noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]);
+
+       return rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map);
+}
+
 struct get_key_cookie {
        struct sk_buff *msg;
        int error;
@@ -2228,6 +2246,7 @@ static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
        [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG },
        [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG },
        [NL80211_STA_FLAG_AUTHENTICATED] = { .type = NLA_FLAG },
+       [NL80211_STA_FLAG_TDLS_PEER] = { .type = NLA_FLAG },
 };
 
 static int parse_station_flags(struct genl_info *info,
@@ -2560,6 +2579,9 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
                params.ht_capa =
                        nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
 
+       if (!rdev->ops->change_station)
+               return -EOPNOTSUPP;
+
        if (parse_station_flags(info, &params))
                return -EINVAL;
 
@@ -2571,73 +2593,84 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
                params.plink_state =
                    nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
 
-       params.vlan = get_vlan(info, rdev);
-       if (IS_ERR(params.vlan))
-               return PTR_ERR(params.vlan);
-
-       /* validate settings */
-       err = 0;
-
        switch (dev->ieee80211_ptr->iftype) {
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_P2P_GO:
                /* disallow mesh-specific things */
                if (params.plink_action)
-                       err = -EINVAL;
+                       return -EINVAL;
+
+               /* TDLS can't be set, ... */
+               if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+                       return -EINVAL;
+               /*
+                * ... but don't bother the driver with it. This works around
+                * a hostapd/wpa_supplicant issue -- it always includes the
+                * TLDS_PEER flag in the mask even for AP mode.
+                */
+               params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
+
+               /* accept only the listed bits */
+               if (params.sta_flags_mask &
+                               ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
+                                 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
+                                 BIT(NL80211_STA_FLAG_WME) |
+                                 BIT(NL80211_STA_FLAG_MFP)))
+                       return -EINVAL;
+
+               /* must be last in here for error handling */
+               params.vlan = get_vlan(info, rdev);
+               if (IS_ERR(params.vlan))
+                       return PTR_ERR(params.vlan);
                break;
        case NL80211_IFTYPE_P2P_CLIENT:
        case NL80211_IFTYPE_STATION:
                /* disallow things sta doesn't support */
                if (params.plink_action)
-                       err = -EINVAL;
-               if (params.vlan)
-                       err = -EINVAL;
-               if (params.supported_rates &&
-                   !(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
-                       err = -EINVAL;
+                       return -EINVAL;
                if (params.ht_capa)
-                       err = -EINVAL;
+                       return -EINVAL;
                if (params.listen_interval >= 0)
-                       err = -EINVAL;
-               if (params.sta_flags_mask &
-                               ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
-                                 BIT(NL80211_STA_FLAG_TDLS_PEER)))
-                       err = -EINVAL;
-               /* can't change the TDLS bit */
-               if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
-                   (params.sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)))
-                       err = -EINVAL;
+                       return -EINVAL;
+               /*
+                * Don't allow userspace to change the TDLS_PEER flag,
+                * but silently ignore attempts to change it since we
+                * don't have state here to verify that it doesn't try
+                * to change the flag.
+                */
+               params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
+
+               /* reject any changes other than AUTHORIZED */
+               if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
+                       return -EINVAL;
                break;
        case NL80211_IFTYPE_MESH_POINT:
                /* disallow things mesh doesn't support */
                if (params.vlan)
-                       err = -EINVAL;
+                       return -EINVAL;
                if (params.ht_capa)
-                       err = -EINVAL;
+                       return -EINVAL;
                if (params.listen_interval >= 0)
-                       err = -EINVAL;
+                       return -EINVAL;
+               /*
+                * No special handling for TDLS here -- the userspace
+                * mesh code doesn't have this bug.
+                */
                if (params.sta_flags_mask &
                                ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) |
                                  BIT(NL80211_STA_FLAG_MFP) |
                                  BIT(NL80211_STA_FLAG_AUTHORIZED)))
-                       err = -EINVAL;
+                       return -EINVAL;
                break;
        default:
-               err = -EINVAL;
+               return -EOPNOTSUPP;
        }
 
-       if (err)
-               goto out;
-
-       if (!rdev->ops->change_station) {
-               err = -EOPNOTSUPP;
-               goto out;
-       }
+       /* be aware of params.vlan when changing code here */
 
        err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, &params);
 
- out:
        if (params.vlan)
                dev_put(params.vlan);
 
@@ -2692,70 +2725,81 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
                params.plink_action =
                    nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
 
+       if (!rdev->ops->add_station)
+               return -EOPNOTSUPP;
+
        if (parse_station_flags(info, &params))
                return -EINVAL;
 
-       /* parse WME attributes if sta is WME capable */
-       if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
-           (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) &&
-           info->attrs[NL80211_ATTR_STA_WME]) {
-               struct nlattr *tb[NL80211_STA_WME_MAX + 1];
-               struct nlattr *nla;
+       switch (dev->ieee80211_ptr->iftype) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_AP_VLAN:
+       case NL80211_IFTYPE_P2P_GO:
+               /* parse WME attributes if sta is WME capable */
+               if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
+                   (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) &&
+                   info->attrs[NL80211_ATTR_STA_WME]) {
+                       struct nlattr *tb[NL80211_STA_WME_MAX + 1];
+                       struct nlattr *nla;
+
+                       nla = info->attrs[NL80211_ATTR_STA_WME];
+                       err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
+                                              nl80211_sta_wme_policy);
+                       if (err)
+                               return err;
 
-               nla = info->attrs[NL80211_ATTR_STA_WME];
-               err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
-                                      nl80211_sta_wme_policy);
-               if (err)
-                       return err;
+                       if (tb[NL80211_STA_WME_UAPSD_QUEUES])
+                               params.uapsd_queues =
+                                    nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
+                       if (params.uapsd_queues &
+                                       ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+                               return -EINVAL;
 
-               if (tb[NL80211_STA_WME_UAPSD_QUEUES])
-                       params.uapsd_queues =
-                            nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
-               if (params.uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
-                       return -EINVAL;
+                       if (tb[NL80211_STA_WME_MAX_SP])
+                               params.max_sp =
+                                    nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
 
-               if (tb[NL80211_STA_WME_MAX_SP])
-                       params.max_sp =
-                            nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
+                       if (params.max_sp &
+                                       ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
+                               return -EINVAL;
 
-               if (params.max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
+                       params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
+               }
+               /* TDLS peers cannot be added */
+               if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
                        return -EINVAL;
+               /* but don't bother the driver with it */
+               params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
 
-               params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
+               /* must be last in here for error handling */
+               params.vlan = get_vlan(info, rdev);
+               if (IS_ERR(params.vlan))
+                       return PTR_ERR(params.vlan);
+               break;
+       case NL80211_IFTYPE_MESH_POINT:
+               /* TDLS peers cannot be added */
+               if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+                       return -EINVAL;
+               break;
+       case NL80211_IFTYPE_STATION:
+               /* Only TDLS peers can be added */
+               if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+                       return -EINVAL;
+               /* Can only add if TDLS ... */
+               if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS))
+                       return -EOPNOTSUPP;
+               /* ... with external setup is supported */
+               if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
+                       return -EOPNOTSUPP;
+               break;
+       default:
+               return -EOPNOTSUPP;
        }
 
-       if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
-           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
-           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
-           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO &&
-           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
-               return -EINVAL;
-
-       /*
-        * Only managed stations can add TDLS peers, and only when the
-        * wiphy supports external TDLS setup.
-        */
-       if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
-           !((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
-             (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
-             (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)))
-               return -EINVAL;
-
-       params.vlan = get_vlan(info, rdev);
-       if (IS_ERR(params.vlan))
-               return PTR_ERR(params.vlan);
-
-       /* validate settings */
-       err = 0;
-
-       if (!rdev->ops->add_station) {
-               err = -EOPNOTSUPP;
-               goto out;
-       }
+       /* be aware of params.vlan when changing code here */
 
        err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, &params);
 
- out:
        if (params.vlan)
                dev_put(params.vlan);
        return err;
@@ -3176,6 +3220,8 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
                        cur_params.dot11MeshHWMPactivePathTimeout);
        NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
                        cur_params.dot11MeshHWMPpreqMinInterval);
+       NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+                       cur_params.dot11MeshHWMPperrMinInterval);
        NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
                        cur_params.dot11MeshHWMPnetDiameterTraversalTime);
        NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
@@ -3210,6 +3256,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
        [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 },
        [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 },
        [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 },
+       [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] = { .type = NLA_U16 },
        [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
        [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
        [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
@@ -3284,6 +3331,9 @@ do {\
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
                        mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
                        nla_get_u16);
+       FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
+                       mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+                       nla_get_u16);
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
                        dot11MeshHWMPnetDiameterTraversalTime,
                        mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
@@ -4657,13 +4707,41 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
                ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
        }
 
-       ibss.channel = ieee80211_get_channel(wiphy,
-               nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
+       if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+               enum nl80211_channel_type channel_type;
+
+               channel_type = nla_get_u32(
+                               info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+               if (channel_type != NL80211_CHAN_NO_HT &&
+                   channel_type != NL80211_CHAN_HT20 &&
+                   channel_type != NL80211_CHAN_HT40MINUS &&
+                   channel_type != NL80211_CHAN_HT40PLUS)
+                       return -EINVAL;
+
+               if (channel_type != NL80211_CHAN_NO_HT &&
+                   !(wiphy->features & NL80211_FEATURE_HT_IBSS))
+                       return -EINVAL;
+
+               ibss.channel_type = channel_type;
+       } else {
+               ibss.channel_type = NL80211_CHAN_NO_HT;
+       }
+
+       ibss.channel = rdev_freq_to_chan(rdev,
+               nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
+               ibss.channel_type);
        if (!ibss.channel ||
            ibss.channel->flags & IEEE80211_CHAN_NO_IBSS ||
            ibss.channel->flags & IEEE80211_CHAN_DISABLED)
                return -EINVAL;
 
+       /* Both channels should be able to initiate communication */
+       if ((ibss.channel_type == NL80211_CHAN_HT40PLUS ||
+            ibss.channel_type == NL80211_CHAN_HT40MINUS) &&
+           !cfg80211_can_beacon_sec_chan(&rdev->wiphy, ibss.channel,
+                                         ibss.channel_type))
+               return -EINVAL;
+
        ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED];
        ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
 
@@ -4742,7 +4820,7 @@ static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
 static int nl80211_testmode_dump(struct sk_buff *skb,
                                 struct netlink_callback *cb)
 {
-       struct cfg80211_registered_device *dev;
+       struct cfg80211_registered_device *rdev;
        int err;
        long phy_idx;
        void *data = NULL;
@@ -4760,9 +4838,21 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                                  nl80211_policy);
                if (err)
                        return err;
-               if (!nl80211_fam.attrbuf[NL80211_ATTR_WIPHY])
-                       return -EINVAL;
-               phy_idx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
+               if (nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]) {
+                       phy_idx = nla_get_u32(
+                               nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
+               } else {
+                       struct net_device *netdev;
+
+                       err = get_rdev_dev_by_ifindex(sock_net(skb->sk),
+                                                     nl80211_fam.attrbuf,
+                                                     &rdev, &netdev);
+                       if (err)
+                               return err;
+                       dev_put(netdev);
+                       phy_idx = rdev->wiphy_idx;
+                       cfg80211_unlock_rdev(rdev);
+               }
                if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA])
                        cb->args[1] =
                                (long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA];
@@ -4774,15 +4864,15 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
        }
 
        mutex_lock(&cfg80211_mutex);
-       dev = cfg80211_rdev_by_wiphy_idx(phy_idx);
-       if (!dev) {
+       rdev = cfg80211_rdev_by_wiphy_idx(phy_idx);
+       if (!rdev) {
                mutex_unlock(&cfg80211_mutex);
                return -ENOENT;
        }
-       cfg80211_lock_rdev(dev);
+       cfg80211_lock_rdev(rdev);
        mutex_unlock(&cfg80211_mutex);
 
-       if (!dev->ops->testmode_dump) {
+       if (!rdev->ops->testmode_dump) {
                err = -EOPNOTSUPP;
                goto out_err;
        }
@@ -4793,7 +4883,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                                           NL80211_CMD_TESTMODE);
                struct nlattr *tmdata;
 
-               if (nla_put_u32(skb, NL80211_ATTR_WIPHY, dev->wiphy_idx) < 0) {
+               if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) {
                        genlmsg_cancel(skb, hdr);
                        break;
                }
@@ -4803,8 +4893,8 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                        genlmsg_cancel(skb, hdr);
                        break;
                }
-               err = dev->ops->testmode_dump(&dev->wiphy, skb, cb,
-                                             data, data_len);
+               err = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb,
+                                              data, data_len);
                nla_nest_end(skb, tmdata);
 
                if (err == -ENOBUFS || err == -ENOENT) {
@@ -4822,7 +4912,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
        /* see above */
        cb->args[0] = phy_idx + 1;
  out_err:
-       cfg80211_unlock_rdev(dev);
+       cfg80211_unlock_rdev(rdev);
        return err;
 }
 
@@ -5648,6 +5738,11 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
        setup.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
        setup.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
 
+       if (info->attrs[NL80211_ATTR_MCAST_RATE] &&
+           !nl80211_parse_mcast_rate(rdev, setup.mcast_rate,
+                           nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE])))
+                       return -EINVAL;
+
        if (info->attrs[NL80211_ATTR_MESH_SETUP]) {
                /* parse additional setup parameters if given */
                err = nl80211_parse_mesh_setup(info, &setup);
@@ -6052,7 +6147,8 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
                }
                info->user_ptr[0] = rdev;
        } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
-               err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+               err = get_rdev_dev_by_ifindex(genl_info_net(info), info->attrs,
+                                             &rdev, &dev);
                if (err) {
                        if (rtnl)
                                rtnl_unlock();
@@ -6604,6 +6700,15 @@ static struct genl_ops nl80211_ops[] = {
                .internal_flags = NL80211_FLAG_NEED_WIPHY |
                                  NL80211_FLAG_NEED_RTNL,
        },
+       {
+               .cmd = NL80211_CMD_SET_NOACK_MAP,
+               .doit = nl80211_set_noack_map,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
+
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
index 76b35df..c45c8b7 100644 (file)
 #define REG_DBG_PRINT(args...)
 #endif
 
+static struct regulatory_request core_request_world = {
+       .initiator = NL80211_REGDOM_SET_BY_CORE,
+       .alpha2[0] = '0',
+       .alpha2[1] = '0',
+       .intersect = false,
+       .processed = true,
+       .country_ie_env = ENVIRON_ANY,
+};
+
 /* Receipt of information from last regulatory request */
-static struct regulatory_request *last_request;
+static struct regulatory_request *last_request = &core_request_world;
 
 /* To trigger userspace events */
 static struct platform_device *reg_pdev;
@@ -150,7 +159,7 @@ static char user_alpha2[2];
 module_param(ieee80211_regdom, charp, 0444);
 MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
 
-static void reset_regdomains(void)
+static void reset_regdomains(bool full_reset)
 {
        /* avoid freeing static information or freeing something twice */
        if (cfg80211_regdomain == cfg80211_world_regdom)
@@ -165,6 +174,13 @@ static void reset_regdomains(void)
 
        cfg80211_world_regdom = &world_regdom;
        cfg80211_regdomain = NULL;
+
+       if (!full_reset)
+               return;
+
+       if (last_request != &core_request_world)
+               kfree(last_request);
+       last_request = &core_request_world;
 }
 
 /*
@@ -175,7 +191,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd)
 {
        BUG_ON(!last_request);
 
-       reset_regdomains();
+       reset_regdomains(false);
 
        cfg80211_world_regdom = rd;
        cfg80211_regdomain = rd;
@@ -857,10 +873,22 @@ static void handle_channel(struct wiphy *wiphy,
        chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
        chan->max_antenna_gain = min(chan->orig_mag,
                (int) MBI_TO_DBI(power_rule->max_antenna_gain));
-       if (chan->orig_mpwr)
-               chan->max_power = min(chan->orig_mpwr,
-                       (int) MBM_TO_DBM(power_rule->max_eirp));
-       else
+       if (chan->orig_mpwr) {
+               /*
+                * Devices that have their own custom regulatory domain
+                * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
+                * passed country IE power settings.
+                */
+               if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+                   wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
+                   wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
+                       chan->max_power =
+                               MBM_TO_DBM(power_rule->max_eirp);
+               } else {
+                       chan->max_power = min(chan->orig_mpwr,
+                               (int) MBM_TO_DBM(power_rule->max_eirp));
+               }
+       } else
                chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
 }
 
@@ -1147,9 +1175,21 @@ void regulatory_update(struct wiphy *wiphy,
 static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
 {
        struct cfg80211_registered_device *rdev;
+       struct wiphy *wiphy;
 
-       list_for_each_entry(rdev, &cfg80211_rdev_list, list)
-               wiphy_update_regulatory(&rdev->wiphy, initiator);
+       list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+               wiphy = &rdev->wiphy;
+               wiphy_update_regulatory(wiphy, initiator);
+               /*
+                * Regulatory updates set by CORE are ignored for custom
+                * regulatory cards. Let us notify the changes to the driver,
+                * as some drivers used this to restore its orig_* reg domain.
+                */
+               if (initiator == NL80211_REGDOM_SET_BY_CORE &&
+                   wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
+                   wiphy->reg_notifier)
+                       wiphy->reg_notifier(wiphy, last_request);
+       }
 }
 
 static void handle_channel_custom(struct wiphy *wiphy,
@@ -1409,7 +1449,8 @@ static int __regulatory_hint(struct wiphy *wiphy,
        }
 
 new_request:
-       kfree(last_request);
+       if (last_request != &core_request_world)
+               kfree(last_request);
 
        last_request = pending_request;
        last_request->intersect = intersect;
@@ -1579,9 +1620,6 @@ static int regulatory_hint_core(const char *alpha2)
 {
        struct regulatory_request *request;
 
-       kfree(last_request);
-       last_request = NULL;
-
        request = kzalloc(sizeof(struct regulatory_request),
                          GFP_KERNEL);
        if (!request)
@@ -1754,6 +1792,26 @@ static void restore_alpha2(char *alpha2, bool reset_user)
                REG_DBG_PRINT("Restoring regulatory settings\n");
 }
 
+static void restore_custom_reg_settings(struct wiphy *wiphy)
+{
+       struct ieee80211_supported_band *sband;
+       enum ieee80211_band band;
+       struct ieee80211_channel *chan;
+       int i;
+
+       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               sband = wiphy->bands[band];
+               if (!sband)
+                       continue;
+               for (i = 0; i < sband->n_channels; i++) {
+                       chan = &sband->channels[i];
+                       chan->flags = chan->orig_flags;
+                       chan->max_antenna_gain = chan->orig_mag;
+                       chan->max_power = chan->orig_mpwr;
+               }
+       }
+}
+
 /*
  * Restoring regulatory settings involves ingoring any
  * possibly stale country IE information and user regulatory
@@ -1775,11 +1833,12 @@ static void restore_regulatory_settings(bool reset_user)
        struct reg_beacon *reg_beacon, *btmp;
        struct regulatory_request *reg_request, *tmp;
        LIST_HEAD(tmp_reg_req_list);
+       struct cfg80211_registered_device *rdev;
 
        mutex_lock(&cfg80211_mutex);
        mutex_lock(&reg_mutex);
 
-       reset_regdomains();
+       reset_regdomains(true);
        restore_alpha2(alpha2, reset_user);
 
        /*
@@ -1823,6 +1882,11 @@ static void restore_regulatory_settings(bool reset_user)
        /* First restore to the basic regulatory settings */
        cfg80211_regdomain = cfg80211_world_regdom;
 
+       list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+               if (rdev->wiphy.flags & WIPHY_FLAG_CUSTOM_REGULATORY)
+                       restore_custom_reg_settings(&rdev->wiphy);
+       }
+
        mutex_unlock(&reg_mutex);
        mutex_unlock(&cfg80211_mutex);
 
@@ -2076,8 +2140,10 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
        }
 
        request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
-       if (!request_wiphy) {
-               reg_set_request_processed();
+       if (!request_wiphy &&
+           (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
+            last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+               schedule_delayed_work(&reg_timeout, 0);
                return -ENODEV;
        }
 
@@ -2085,7 +2151,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
                int r;
 
                if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
-                       reset_regdomains();
+                       reset_regdomains(false);
                        cfg80211_regdomain = rd;
                        return 0;
                }
@@ -2106,7 +2172,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
                if (r)
                        return r;
 
-               reset_regdomains();
+               reset_regdomains(false);
                cfg80211_regdomain = rd;
                return 0;
        }
@@ -2131,7 +2197,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
 
                rd = NULL;
 
-               reset_regdomains();
+               reset_regdomains(false);
                cfg80211_regdomain = intersected_rd;
 
                return 0;
@@ -2151,7 +2217,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
        kfree(rd);
        rd = NULL;
 
-       reset_regdomains();
+       reset_regdomains(false);
        cfg80211_regdomain = intersected_rd;
 
        return 0;
@@ -2304,11 +2370,8 @@ void /* __init_or_exit */ regulatory_exit(void)
        mutex_lock(&cfg80211_mutex);
        mutex_lock(&reg_mutex);
 
-       reset_regdomains();
-
-       kfree(last_request);
+       reset_regdomains(true);
 
-       last_request = NULL;
        dev_set_uevent_suppress(&reg_pdev->dev, true);
 
        platform_device_unregister(reg_pdev);
index f0c900c..7b9ecae 100644 (file)
@@ -553,45 +553,35 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
 EXPORT_SYMBOL(cfg80211_connect_result);
 
 void __cfg80211_roamed(struct wireless_dev *wdev,
-                      struct ieee80211_channel *channel,
-                      const u8 *bssid,
+                      struct cfg80211_bss *bss,
                       const u8 *req_ie, size_t req_ie_len,
                       const u8 *resp_ie, size_t resp_ie_len)
 {
-       struct cfg80211_bss *bss;
 #ifdef CONFIG_CFG80211_WEXT
        union iwreq_data wrqu;
 #endif
-
        ASSERT_WDEV_LOCK(wdev);
 
        if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
                    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
-               return;
+               goto out;
 
        if (wdev->sme_state != CFG80211_SME_CONNECTED)
-               return;
+               goto out;
 
        /* internal error -- how did we get to CONNECTED w/o BSS? */
        if (WARN_ON(!wdev->current_bss)) {
-               return;
+               goto out;
        }
 
        cfg80211_unhold_bss(wdev->current_bss);
        cfg80211_put_bss(&wdev->current_bss->pub);
        wdev->current_bss = NULL;
 
-       bss = cfg80211_get_bss(wdev->wiphy, channel, bssid,
-                              wdev->ssid, wdev->ssid_len,
-                              WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
-
-       if (WARN_ON(!bss))
-               return;
-
        cfg80211_hold_bss(bss_from_pub(bss));
        wdev->current_bss = bss_from_pub(bss);
 
-       nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bssid,
+       nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bss->bssid,
                            req_ie, req_ie_len, resp_ie, resp_ie_len,
                            GFP_KERNEL);
 
@@ -612,11 +602,15 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
 
        memset(&wrqu, 0, sizeof(wrqu));
        wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-       memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
-       memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
+       memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN);
+       memcpy(wdev->wext.prev_bssid, bss->bssid, ETH_ALEN);
        wdev->wext.prev_bssid_valid = true;
        wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL);
 #endif
+
+       return;
+out:
+       cfg80211_put_bss(bss);
 }
 
 void cfg80211_roamed(struct net_device *dev,
@@ -624,6 +618,27 @@ void cfg80211_roamed(struct net_device *dev,
                     const u8 *bssid,
                     const u8 *req_ie, size_t req_ie_len,
                     const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp)
+{
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct cfg80211_bss *bss;
+
+       CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
+
+       bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
+                              wdev->ssid_len, WLAN_CAPABILITY_ESS,
+                              WLAN_CAPABILITY_ESS);
+       if (WARN_ON(!bss))
+               return;
+
+       cfg80211_roamed_bss(dev, bss, req_ie, req_ie_len, resp_ie,
+                           resp_ie_len, gfp);
+}
+EXPORT_SYMBOL(cfg80211_roamed);
+
+void cfg80211_roamed_bss(struct net_device *dev,
+                        struct cfg80211_bss *bss, const u8 *req_ie,
+                        size_t req_ie_len, const u8 *resp_ie,
+                        size_t resp_ie_len, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -632,26 +647,30 @@ void cfg80211_roamed(struct net_device *dev,
 
        CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
 
+       if (WARN_ON(!bss))
+               return;
+
        ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
-       if (!ev)
+       if (!ev) {
+               cfg80211_put_bss(bss);
                return;
+       }
 
        ev->type = EVENT_ROAMED;
-       ev->rm.channel = channel;
-       memcpy(ev->rm.bssid, bssid, ETH_ALEN);
        ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev);
        ev->rm.req_ie_len = req_ie_len;
        memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len);
        ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
        ev->rm.resp_ie_len = resp_ie_len;
        memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len);
+       ev->rm.bss = bss;
 
        spin_lock_irqsave(&wdev->event_lock, flags);
        list_add_tail(&ev->list, &wdev->event_list);
        spin_unlock_irqrestore(&wdev->event_lock, flags);
        queue_work(cfg80211_wq, &rdev->event_work);
 }
-EXPORT_SYMBOL(cfg80211_roamed);
+EXPORT_SYMBOL(cfg80211_roamed_bss);
 
 void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
                             size_t ie_len, u16 reason, bool from_ap)
index 9c601d5..e77df75 100644 (file)
@@ -740,9 +740,9 @@ static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
                                NULL);
                        break;
                case EVENT_ROAMED:
-                       __cfg80211_roamed(wdev, ev->rm.channel, ev->rm.bssid,
-                                         ev->rm.req_ie, ev->rm.req_ie_len,
-                                         ev->rm.resp_ie, ev->rm.resp_ie_len);
+                       __cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
+                                         ev->rm.req_ie_len, ev->rm.resp_ie,
+                                         ev->rm.resp_ie_len);
                        break;
                case EVENT_DISCONNECTED:
                        __cfg80211_disconnected(wdev->netdev,
index 3e16c6a..a306bc6 100644 (file)
@@ -232,7 +232,7 @@ static int x25_device_event(struct notifier_block *this, unsigned long event,
                return NOTIFY_DONE;
 
        if (dev->type == ARPHRD_X25
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
         || dev->type == ARPHRD_ETHER
 #endif
         ) {
index fa2b418..f0ce862 100644 (file)
@@ -161,7 +161,7 @@ void x25_establish_link(struct x25_neigh *nb)
                *ptr = X25_IFACE_CONNECT;
                break;
 
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
        case ARPHRD_ETHER:
                return;
 #endif
@@ -180,7 +180,7 @@ void x25_terminate_link(struct x25_neigh *nb)
        struct sk_buff *skb;
        unsigned char *ptr;
 
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
        if (nb->dev->type == ARPHRD_ETHER)
                return;
 #endif
@@ -213,7 +213,7 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
                *dptr = X25_IFACE_DATA;
                break;
 
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
        case ARPHRD_ETHER:
                kfree_skb(skb);
                return;
index 97d77c5..cf63662 100644 (file)
@@ -134,7 +134,7 @@ struct net_device *x25_dev_get(char *devname)
 
        if (dev &&
            (!(dev->flags & IFF_UP) || (dev->type != ARPHRD_X25
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
                                        && dev->type != ARPHRD_ETHER
 #endif
                                        ))){
index 4fce1ce..7661576 100644 (file)
@@ -1340,7 +1340,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
        case AF_INET:
                dst_ops = &net->xfrm.xfrm4_dst_ops;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                dst_ops = &net->xfrm.xfrm6_dst_ops;
                break;
@@ -1499,7 +1499,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                goto free_dst;
 
        /* Copy neighbour for reachability confirmation */
-       dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour(dst)));
+       dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
 
        xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
        xfrm_init_pmtu(dst_prev);
@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
 {
        struct dst_entry *head, *next;
 
-       flow_cache_flush();
-
        spin_lock_bh(&xfrm_policy_sk_bundle_lock);
        head = xfrm_policy_sk_bundles;
        xfrm_policy_sk_bundles = NULL;
@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
        }
 }
 
+static void xfrm_garbage_collect(struct net *net)
+{
+       flow_cache_flush();
+       __xfrm_garbage_collect(net);
+}
+
+static void xfrm_garbage_collect_deferred(struct net *net)
+{
+       flow_cache_flush_deferred();
+       __xfrm_garbage_collect(net);
+}
+
 static void xfrm_init_pmtu(struct dst_entry *dst)
 {
        do {
@@ -2422,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                if (likely(dst_ops->neigh_lookup == NULL))
                        dst_ops->neigh_lookup = xfrm_neigh_lookup;
                if (likely(afinfo->garbage_collect == NULL))
-                       afinfo->garbage_collect = __xfrm_garbage_collect;
+                       afinfo->garbage_collect = xfrm_garbage_collect_deferred;
                xfrm_policy_afinfo[afinfo->family] = afinfo;
        }
        write_unlock_bh(&xfrm_policy_afinfo_lock);
@@ -2435,7 +2445,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                case AF_INET:
                        xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
                        break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                case AF_INET6:
                        xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
                        break;
@@ -2485,7 +2495,7 @@ static void __net_init xfrm_dst_ops_init(struct net *net)
        afinfo = xfrm_policy_afinfo[AF_INET];
        if (afinfo)
                net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        afinfo = xfrm_policy_afinfo[AF_INET6];
        if (afinfo)
                net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
@@ -2516,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
 
        switch (event) {
        case NETDEV_DOWN:
-               __xfrm_garbage_collect(dev_net(dev));
+               xfrm_garbage_collect(dev_net(dev));
        }
        return NOTIFY_DONE;
 }
index d0a42df..e0d747a 100644 (file)
@@ -28,7 +28,7 @@
 #include <net/netlink.h>
 #include <net/ah.h>
 #include <asm/uaccess.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/in6.h>
 #endif
 
@@ -150,7 +150,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                break;
 
        case AF_INET6:
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                break;
 #else
                err = -EAFNOSUPPORT;
@@ -201,7 +201,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                        goto out;
                break;
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case IPPROTO_DSTOPTS:
        case IPPROTO_ROUTING:
                if (attrs[XFRMA_ALG_COMP]       ||
@@ -1160,7 +1160,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
                break;
 
        case AF_INET6:
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                break;
 #else
                return  -EAFNOSUPPORT;
@@ -1231,7 +1231,7 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                switch (ut[i].family) {
                case AF_INET:
                        break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                case AF_INET6:
                        break;
 #endif
@@ -2604,7 +2604,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
                        return NULL;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                if (opt != IPV6_XFRM_POLICY) {
                        *dir = -EOPNOTSUPP;
index 36cc0cc..b566eba 100644 (file)
@@ -57,23 +57,44 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
 static int d_namespace_path(struct path *path, char *buf, int buflen,
                            char **name, int flags)
 {
-       struct path root, tmp;
        char *res;
-       int connected, error = 0;
+       int error = 0;
+       int connected = 1;
+
+       if (path->mnt->mnt_flags & MNT_INTERNAL) {
+               /* it's not mounted anywhere */
+               res = dentry_path(path->dentry, buf, buflen);
+               *name = res;
+               if (IS_ERR(res)) {
+                       *name = buf;
+                       return PTR_ERR(res);
+               }
+               if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+                   strncmp(*name, "/sys/", 5) == 0) {
+                       /* TODO: convert over to using a per namespace
+                        * control instead of hard coded /proc
+                        */
+                       return prepend(name, *name - buf, "/proc", 5);
+               }
+               return 0;
+       }
 
-       /* Get the root we want to resolve too, released below */
+       /* resolve paths relative to chroot?*/
        if (flags & PATH_CHROOT_REL) {
-               /* resolve paths relative to chroot */
+               struct path root;
                get_fs_root(current->fs, &root);
-       } else {
-               /* resolve paths relative to namespace */
-               root.mnt = current->nsproxy->mnt_ns->root;
-               root.dentry = root.mnt->mnt_root;
-               path_get(&root);
+               res = __d_path(path, &root, buf, buflen);
+               if (res && !IS_ERR(res)) {
+                       /* everything's fine */
+                       *name = res;
+                       path_put(&root);
+                       goto ok;
+               }
+               path_put(&root);
+               connected = 0;
        }
 
-       tmp = root;
-       res = __d_path(path, &tmp, buf, buflen);
+       res = d_absolute_path(path, buf, buflen);
 
        *name = res;
        /* handle error conditions - and still allow a partial path to
@@ -84,7 +105,10 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                *name = buf;
                goto out;
        }
+       if (!our_mnt(path->mnt))
+               connected = 0;
 
+ok:
        /* Handle two cases:
         * 1. A deleted dentry && profile is not allowing mediation of deleted
         * 2. On some filesystems, newly allocated dentries appear to the
@@ -97,10 +121,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                        goto out;
        }
 
-       /* Determine if the path is connected to the expected root */
-       connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt;
-
-       /* If the path is not connected,
+       /* If the path is not connected to the expected root,
         * check if it is a sysctl and handle specially else remove any
         * leading / that __d_path may have returned.
         * Unless
@@ -112,17 +133,9 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
         *     namespace root.
         */
        if (!connected) {
-               /* is the disconnect path a sysctl? */
-               if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
-                   strncmp(*name, "/sys/", 5) == 0) {
-                       /* TODO: convert over to using a per namespace
-                        * control instead of hard coded /proc
-                        */
-                       error = prepend(name, *name - buf, "/proc", 5);
-               } else if (!(flags & PATH_CONNECT_PATH) &&
+               if (!(flags & PATH_CONNECT_PATH) &&
                           !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
-                            (tmp.mnt == current->nsproxy->mnt_ns->root &&
-                             tmp.dentry == tmp.mnt->mnt_root))) {
+                            our_mnt(path->mnt))) {
                        /* disconnected path, don't return pathname starting
                         * with '/'
                         */
@@ -133,8 +146,6 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        }
 
 out:
-       path_put(&root);
-
        return error;
 }
 
index 5dd5b14..8738def 100644 (file)
@@ -27,20 +27,35 @@ static int evmkey_len = MAX_KEY_SIZE;
 
 struct crypto_shash *hmac_tfm;
 
+static DEFINE_MUTEX(mutex);
+
 static struct shash_desc *init_desc(void)
 {
        int rc;
        struct shash_desc *desc;
 
        if (hmac_tfm == NULL) {
+               mutex_lock(&mutex);
+               if (hmac_tfm)
+                       goto out;
                hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC);
                if (IS_ERR(hmac_tfm)) {
                        pr_err("Can not allocate %s (reason: %ld)\n",
                               evm_hmac, PTR_ERR(hmac_tfm));
                        rc = PTR_ERR(hmac_tfm);
                        hmac_tfm = NULL;
+                       mutex_unlock(&mutex);
+                       return ERR_PTR(rc);
+               }
+               rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
+               if (rc) {
+                       crypto_free_shash(hmac_tfm);
+                       hmac_tfm = NULL;
+                       mutex_unlock(&mutex);
                        return ERR_PTR(rc);
                }
+out:
+               mutex_unlock(&mutex);
        }
 
        desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm),
@@ -51,11 +66,7 @@ static struct shash_desc *init_desc(void)
        desc->tfm = hmac_tfm;
        desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
-       rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
-       if (rc)
-               goto out;
        rc = crypto_shash_init(desc);
-out:
        if (rc) {
                kfree(desc);
                return ERR_PTR(rc);
index cca09bb..86305c2 100644 (file)
@@ -1090,7 +1090,7 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc
                        return SECCLASS_NETLINK_ROUTE_SOCKET;
                case NETLINK_FIREWALL:
                        return SECCLASS_NETLINK_FIREWALL_SOCKET;
-               case NETLINK_INET_DIAG:
+               case NETLINK_SOCK_DIAG:
                        return SECCLASS_NETLINK_TCPDIAG_SOCKET;
                case NETLINK_NFLOG:
                        return SECCLASS_NETLINK_NFLOG_SOCKET;
index 0b62bd1..7b9eb1f 100644 (file)
@@ -123,7 +123,9 @@ static void sel_netport_insert(struct sel_netport *port)
        if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
                struct sel_netport *tail;
                tail = list_entry(
-                       rcu_dereference(sel_netport_hash[idx].list.prev),
+                       rcu_dereference_protected(
+                               sel_netport_hash[idx].list.prev,
+                               lockdep_is_held(&sel_netport_lock)),
                        struct sel_netport, list);
                list_del_rcu(&tail->list);
                kfree_rcu(tail, rcu);
index 738bbdf..d9f3ced 100644 (file)
@@ -101,9 +101,8 @@ static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
 {
        char *pos = ERR_PTR(-ENOMEM);
        if (buflen >= 256) {
-               struct path ns_root = { };
                /* go to whatever namespace root we are under */
-               pos = __d_path(path, &ns_root, buffer, buflen - 1);
+               pos = d_absolute_path(path, buffer, buflen - 1);
                if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
                        struct inode *inode = path->dentry->d_inode;
                        if (inode && S_ISDIR(inode->i_mode)) {
@@ -294,8 +293,16 @@ char *tomoyo_realpath_from_path(struct path *path)
                        pos = tomoyo_get_local_path(path->dentry, buf,
                                                    buf_len - 1);
                /* Get absolute name for the rest. */
-               else
+               else {
                        pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+                       /*
+                        * Fall back to local name if absolute name is not
+                        * available.
+                        */
+                       if (pos == ERR_PTR(-EINVAL))
+                               pos = tomoyo_get_local_path(path->dentry, buf,
+                                                           buf_len - 1);
+               }
 encode:
                if (IS_ERR(pos))
                        continue;
index 6e5adde..73516f6 100644 (file)
@@ -899,6 +899,10 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip)
                /* AC97 v2.2 specifications says minimum 1 us. */
                udelay(2);
                gpio_set_value(chip->reset_pin, 1);
+       } else {
+               ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA);
+               udelay(2);
+               ac97c_writel(chip, MR, AC97C_MR_ENA);
        }
 }
 
index 096507d..c2f79e6 100644 (file)
@@ -2507,8 +2507,8 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
        SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+       SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
@@ -2971,7 +2971,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* SCH */
        { PCI_DEVICE(0x8086, 0x811b),
          .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE},
+         AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
+       /* ICH */
        { PCI_DEVICE(0x8086, 0x2668),
          .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
          AZX_DCAPS_BUFSIZE },  /* ICH6 */
index cbde019..1d07e8f 100644 (file)
@@ -297,6 +297,8 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
        imux = &spec->input_mux[mux_idx];
        if (!imux->num_items && mux_idx > 0)
                imux = &spec->input_mux[0];
+       if (!imux->num_items)
+               return 0;
 
        if (idx >= imux->num_items)
                idx = imux->num_items - 1;
@@ -2629,6 +2631,8 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
        case AUTO_PIN_SPEAKER_OUT:
                if (cfg->line_outs == 1)
                        return "Speaker";
+               if (cfg->line_outs == 2)
+                       return ch ? "Bass Speaker" : "Speaker";
                break;
        case AUTO_PIN_HP_OUT:
                /* for multi-io case, only the primary out */
@@ -2902,7 +2906,7 @@ static hda_nid_t alc_auto_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
                if (!nid)
                        continue;
                if (found_in_nid_list(nid, spec->multiout.dac_nids,
-                                     spec->multiout.num_dacs))
+                                     ARRAY_SIZE(spec->private_dac_nids)))
                        continue;
                if (found_in_nid_list(nid, spec->multiout.hp_out_nid,
                                      ARRAY_SIZE(spec->multiout.hp_out_nid)))
@@ -2923,6 +2927,7 @@ static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin)
        return 0;
 }
 
+/* return 0 if no possible DAC is found, 1 if one or more found */
 static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
                                    const hda_nid_t *pins, hda_nid_t *dacs)
 {
@@ -2940,7 +2945,7 @@ static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
                if (!dacs[i])
                        dacs[i] = alc_auto_look_for_dac(codec, pins[i]);
        }
-       return 0;
+       return 1;
 }
 
 static int alc_auto_fill_multi_ios(struct hda_codec *codec,
@@ -2950,7 +2955,7 @@ static int alc_auto_fill_multi_ios(struct hda_codec *codec,
 static int alc_auto_fill_dac_nids(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       const struct auto_pin_cfg *cfg = &spec->autocfg;
+       struct auto_pin_cfg *cfg = &spec->autocfg;
        bool redone = false;
        int i;
 
@@ -2961,6 +2966,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        spec->multiout.extra_out_nid[0] = 0;
        memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
        spec->multiout.dac_nids = spec->private_dac_nids;
+       spec->multi_ios = 0;
 
        /* fill hard-wired DACs first */
        if (!redone) {
@@ -2994,10 +3000,12 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        for (i = 0; i < cfg->line_outs; i++) {
                if (spec->private_dac_nids[i])
                        spec->multiout.num_dacs++;
-               else
+               else {
                        memmove(spec->private_dac_nids + i,
                                spec->private_dac_nids + i + 1,
                                sizeof(hda_nid_t) * (cfg->line_outs - i - 1));
+                       spec->private_dac_nids[cfg->line_outs - 1] = 0;
+               }
        }
 
        if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
@@ -3019,9 +3027,28 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        if (cfg->line_out_type != AUTO_PIN_HP_OUT)
                alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins,
                                 spec->multiout.hp_out_nid);
-       if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
-               alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, cfg->speaker_pins,
-                                spec->multiout.extra_out_nid);
+       if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+               int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs,
+                                       cfg->speaker_pins,
+                                       spec->multiout.extra_out_nid);
+               /* if no speaker volume is assigned, try again as the primary
+                * output
+                */
+               if (!err && cfg->speaker_outs > 0 &&
+                   cfg->line_out_type == AUTO_PIN_HP_OUT) {
+                       cfg->hp_outs = cfg->line_outs;
+                       memcpy(cfg->hp_pins, cfg->line_out_pins,
+                              sizeof(cfg->hp_pins));
+                       cfg->line_outs = cfg->speaker_outs;
+                       memcpy(cfg->line_out_pins, cfg->speaker_pins,
+                              sizeof(cfg->speaker_pins));
+                       cfg->speaker_outs = 0;
+                       memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins));
+                       cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
+                       redone = false;
+                       goto again;
+               }
+       }
 
        return 0;
 }
@@ -3171,7 +3198,8 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec,
 }
 
 static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
-                                    hda_nid_t dac, const char *pfx)
+                                    hda_nid_t dac, const char *pfx,
+                                    int cidx)
 {
        struct alc_spec *spec = codec->spec;
        hda_nid_t sw, vol;
@@ -3187,15 +3215,15 @@ static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
                if (is_ctl_used(spec->sw_ctls, val))
                        return 0; /* already created */
                mark_ctl_usage(spec->sw_ctls, val);
-               return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, val);
+               return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, cidx, val);
        }
 
        sw = alc_look_for_out_mute_nid(codec, pin, dac);
        vol = alc_look_for_out_vol_nid(codec, pin, dac);
-       err = alc_auto_add_stereo_vol(codec, pfx, 0, vol);
+       err = alc_auto_add_stereo_vol(codec, pfx, cidx, vol);
        if (err < 0)
                return err;
-       err = alc_auto_add_stereo_sw(codec, pfx, 0, sw);
+       err = alc_auto_add_stereo_sw(codec, pfx, cidx, sw);
        if (err < 0)
                return err;
        return 0;
@@ -3236,16 +3264,21 @@ static int alc_auto_create_extra_outs(struct hda_codec *codec, int num_pins,
                hda_nid_t dac = *dacs;
                if (!dac)
                        dac = spec->multiout.dac_nids[0];
-               return alc_auto_create_extra_out(codec, *pins, dac, pfx);
+               return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0);
        }
 
        if (dacs[num_pins - 1]) {
                /* OK, we have a multi-output system with individual volumes */
                for (i = 0; i < num_pins; i++) {
-                       snprintf(name, sizeof(name), "%s %s",
-                                pfx, channel_name[i]);
-                       err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
-                                                       name);
+                       if (num_pins >= 3) {
+                               snprintf(name, sizeof(name), "%s %s",
+                                        pfx, channel_name[i]);
+                               err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+                                                               name, 0);
+                       } else {
+                               err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+                                                               pfx, i);
+                       }
                        if (err < 0)
                                return err;
                }
index f365865..616678f 100644 (file)
@@ -215,6 +215,7 @@ struct sigmatel_spec {
        unsigned int gpio_mute;
        unsigned int gpio_led;
        unsigned int gpio_led_polarity;
+       unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */
        unsigned int vref_led;
 
        /* stream */
@@ -4318,12 +4319,10 @@ static void stac_store_hints(struct hda_codec *codec)
                spec->eapd_switch = val;
        get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity);
        if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) {
-               if (spec->gpio_led <= 8) {
-                       spec->gpio_mask |= spec->gpio_led;
-                       spec->gpio_dir |= spec->gpio_led;
-                       if (spec->gpio_led_polarity)
-                               spec->gpio_data |= spec->gpio_led;
-               }
+               spec->gpio_mask |= spec->gpio_led;
+               spec->gpio_dir |= spec->gpio_led;
+               if (spec->gpio_led_polarity)
+                       spec->gpio_data |= spec->gpio_led;
        }
 }
 
@@ -4441,7 +4440,9 @@ static int stac92xx_init(struct hda_codec *codec)
                int pinctl, def_conf;
 
                /* power on when no jack detection is available */
-               if (!spec->hp_detect) {
+               /* or when the VREF is used for controlling LED */
+               if (!spec->hp_detect ||
+                   spec->vref_mute_led_nid == nid) {
                        stac_toggle_power_map(codec, nid, 1);
                        continue;
                }
@@ -4913,8 +4914,14 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
                        if (sscanf(dev->name, "HP_Mute_LED_%d_%x",
                                  &spec->gpio_led_polarity,
                                  &spec->gpio_led) == 2) {
-                               if (spec->gpio_led < 4)
+                               unsigned int max_gpio;
+                               max_gpio = snd_hda_param_read(codec, codec->afg,
+                                                             AC_PAR_GPIO_CAP);
+                               max_gpio &= AC_GPIO_IO_COUNT;
+                               if (spec->gpio_led < max_gpio)
                                        spec->gpio_led = 1 << spec->gpio_led;
+                               else
+                                       spec->vref_mute_led_nid = spec->gpio_led;
                                return 1;
                        }
                        if (sscanf(dev->name, "HP_Mute_LED_%d",
@@ -4922,6 +4929,12 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
                                set_hp_led_gpio(codec);
                                return 1;
                        }
+                       /* BIOS bug: unfilled OEM string */
+                       if (strstr(dev->name, "HP_Mute_LED_P_G")) {
+                               set_hp_led_gpio(codec);
+                               spec->gpio_led_polarity = 1;
+                               return 1;
+                       }
                }
 
                /*
@@ -5043,29 +5056,12 @@ static int stac92xx_pre_resume(struct hda_codec *codec)
        struct sigmatel_spec *spec = codec->spec;
 
        /* sync mute LED */
-       if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
-                       stac_gpio_set(codec, spec->gpio_mask,
-                                       spec->gpio_dir, spec->gpio_data);
-               } else {
-                       stac_vrefout_set(codec,
-                                       spec->gpio_led, spec->vref_led);
-               }
-       }
-       return 0;
-}
-
-static int stac92xx_post_suspend(struct hda_codec *codec)
-{
-       struct sigmatel_spec *spec = codec->spec;
-       if (spec->gpio_led > 8) {
-               /* with vref-out pin used for mute led control
-                * codec AFG is prevented from D3 state, but on
-                * system suspend it can (and should) be used
-                */
-               snd_hda_codec_read(codec, codec->afg, 0,
-                               AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-       }
+       if (spec->vref_mute_led_nid)
+               stac_vrefout_set(codec, spec->vref_mute_led_nid,
+                                spec->vref_led);
+       else if (spec->gpio_led)
+               stac_gpio_set(codec, spec->gpio_mask,
+                             spec->gpio_dir, spec->gpio_data);
        return 0;
 }
 
@@ -5076,7 +5072,7 @@ static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg,
        struct sigmatel_spec *spec = codec->spec;
 
        if (power_state == AC_PWRST_D3) {
-               if (spec->gpio_led > 8) {
+               if (spec->vref_mute_led_nid) {
                        /* with vref-out pin used for mute led control
                         * codec AFG is prevented from D3 state
                         */
@@ -5129,7 +5125,7 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
                }
        }
        /*polarity defines *not* muted state level*/
-       if (spec->gpio_led <= 8) {
+       if (!spec->vref_mute_led_nid) {
                if (muted)
                        spec->gpio_data &= ~spec->gpio_led; /* orange */
                else
@@ -5147,7 +5143,8 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
                muted_lvl = spec->gpio_led_polarity ?
                                AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ;
                spec->vref_led = muted ? muted_lvl : notmtd_lvl;
-               stac_vrefout_set(codec, spec->gpio_led, spec->vref_led);
+               stac_vrefout_set(codec, spec->vref_mute_led_nid,
+                                spec->vref_led);
        }
        return 0;
 }
@@ -5661,15 +5658,13 @@ again:
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
+               if (!spec->vref_mute_led_nid) {
                        spec->gpio_mask |= spec->gpio_led;
                        spec->gpio_dir |= spec->gpio_led;
                        spec->gpio_data |= spec->gpio_led;
                } else {
                        codec->patch_ops.set_power_state =
                                        stac92xx_set_power_state;
-                       codec->patch_ops.post_suspend =
-                                       stac92xx_post_suspend;
                }
                codec->patch_ops.pre_resume = stac92xx_pre_resume;
                codec->patch_ops.check_power_status =
@@ -5976,15 +5971,13 @@ again:
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
+               if (!spec->vref_mute_led_nid) {
                        spec->gpio_mask |= spec->gpio_led;
                        spec->gpio_dir |= spec->gpio_led;
                        spec->gpio_data |= spec->gpio_led;
                } else {
                        codec->patch_ops.set_power_state =
                                        stac92xx_set_power_state;
-                       codec->patch_ops.post_suspend =
-                                       stac92xx_post_suspend;
                }
                codec->patch_ops.pre_resume = stac92xx_pre_resume;
                codec->patch_ops.check_power_status =
index a391e62..28dfafb 100644 (file)
@@ -41,6 +41,7 @@ MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");
 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
 static int enable = 1;
+static int codecs = 1;
 
 module_param(index, int, 0444);
 MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator.");
@@ -48,6 +49,8 @@ module_param(id, charp, 0444);
 MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator.");
 module_param(enable, bool, 0444);
 MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator.");
+module_param(codecs, int, 0444);
+MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)");
 
 static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) },
@@ -140,6 +143,9 @@ struct sis7019 {
        dma_addr_t silence_dma_addr;
 };
 
+/* These values are also used by the module param 'codecs' to indicate
+ * which codecs should be present.
+ */
 #define SIS_PRIMARY_CODEC_PRESENT      0x0001
 #define SIS_SECONDARY_CODEC_PRESENT    0x0002
 #define SIS_TERTIARY_CODEC_PRESENT     0x0004
@@ -1078,6 +1084,7 @@ static int sis_chip_init(struct sis7019 *sis)
 {
        unsigned long io = sis->ioport;
        void __iomem *ioaddr = sis->ioaddr;
+       unsigned long timeout;
        u16 status;
        int count;
        int i;
@@ -1104,21 +1111,45 @@ static int sis_chip_init(struct sis7019 *sis)
        while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count)
                udelay(1);
 
+       /* Command complete, we can let go of the semaphore now.
+        */
+       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
+       if (!count)
+               return -EIO;
+
        /* Now that we've finished the reset, find out what's attached.
+        * There are some codec/board combinations that take an extremely
+        * long time to come up. 350+ ms has been observed in the field,
+        * so we'll give them up to 500ms.
         */
-       status = inl(io + SIS_AC97_STATUS);
-       if (status & SIS_AC97_STATUS_CODEC_READY)
-               sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC2_READY)
-               sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC3_READY)
-               sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
-
-       /* All done, let go of the semaphore, and check for errors
+       sis->codecs_present = 0;
+       timeout = msecs_to_jiffies(500) + jiffies;
+       while (time_before_eq(jiffies, timeout)) {
+               status = inl(io + SIS_AC97_STATUS);
+               if (status & SIS_AC97_STATUS_CODEC_READY)
+                       sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC2_READY)
+                       sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC3_READY)
+                       sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
+
+               if (sis->codecs_present == codecs)
+                       break;
+
+               msleep(1);
+       }
+
+       /* All done, check for errors.
         */
-       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
-       if (!sis->codecs_present || !count)
+       if (!sis->codecs_present) {
+               printk(KERN_ERR "sis7019: could not find any codecs\n");
                return -EIO;
+       }
+
+       if (sis->codecs_present != codecs) {
+               printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n",
+                      sis->codecs_present, codecs);
+       }
 
        /* Let the hardware know that the audio driver is alive,
         * and enable PCM slots on the AC-link for L/R playback (3 & 4) and
@@ -1390,6 +1421,17 @@ static int __devinit snd_sis7019_probe(struct pci_dev *pci,
        if (!enable)
                goto error_out;
 
+       /* The user can specify which codecs should be present so that we
+        * can wait for them to show up if they are slow to recover from
+        * the AC97 cold reset. We default to a single codec, the primary.
+        *
+        * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2.
+        */
+       codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT |
+                 SIS_TERTIARY_CODEC_PRESENT;
+       if (!codecs)
+               codecs = SIS_PRIMARY_CODEC_PRESENT;
+
        rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card);
        if (rc < 0)
                goto error_out;
index bee3c94..d1fcc81 100644 (file)
@@ -1,6 +1,6 @@
 config SND_ATMEL_SOC
        tristate "SoC Audio for the Atmel System-on-Chip"
-       depends on ARCH_AT91 || AVR32
+       depends on ARCH_AT91
        help
          Say Y or M if you want to add support for codecs attached to
          the ATMEL SSC interface. You will also need
@@ -24,25 +24,6 @@ config SND_AT91_SOC_SAM9G20_WM8731
          Say Y if you want to add support for SoC audio on WM8731-based
          AT91sam9g20 evaluation board.
 
-config SND_AT32_SOC_PLAYPAQ
-        tristate "SoC Audio support for PlayPaq with WM8510"
-        depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS
-        select SND_ATMEL_SOC_SSC
-        select SND_SOC_WM8510
-        help
-          Say Y or M here if you want to add support for SoC audio
-          on the LRS PlayPaq.
-
-config SND_AT32_SOC_PLAYPAQ_SLAVE
-        bool "Run CODEC on PlayPaq in slave mode"
-        depends on SND_AT32_SOC_PLAYPAQ
-        default n
-        help
-          Say Y if you want to run with the AT32 SSC generating the BCLK
-          and FRAME signals on the PlayPaq.  Unless you want to play
-          with the AT32 as the SSC master, you probably want to say N here,
-          as this will give you better sound quality.
-
 config SND_AT91_SOC_AFEB9260
        tristate "SoC Audio support for AFEB9260 board"
        depends on ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
index e7ea56b..a5c0bf1 100644 (file)
@@ -8,9 +8,5 @@ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
 # AT91 Machine Support
 snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
 
-# AT32 Machine Support
-snd-soc-playpaq-objs := playpaq_wm8510.o
-
 obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
-obj-$(CONFIG_SND_AT32_SOC_PLAYPAQ) += snd-soc-playpaq.o
 obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c
deleted file mode 100644 (file)
index 73ae99a..0000000
+++ /dev/null
@@ -1,473 +0,0 @@
-/* sound/soc/at32/playpaq_wm8510.c
- * ASoC machine driver for PlayPaq using WM8510 codec
- *
- * Copyright (C) 2008 Long Range Systems
- *    Geoffrey Wossum <gwossum@acm.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This code is largely inspired by sound/soc/at91/eti_b1_wm8731.c
- *
- * NOTE: If you don't have the AT32 enhanced portmux configured (which
- * isn't currently in the mainline or Atmel patched kernel), you will
- * need to set the MCLK pin (PA30) to peripheral A in your board initialization
- * code.  Something like:
- *     at32_select_periph(GPIO_PIN_PA(30), GPIO_PERIPH_A, 0);
- *
- */
-
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <mach/at32ap700x.h>
-#include <mach/portmux.h>
-
-#include "../codecs/wm8510.h"
-#include "atmel-pcm.h"
-#include "atmel_ssc_dai.h"
-
-
-/*-------------------------------------------------------------------------*\
- * constants
-\*-------------------------------------------------------------------------*/
-#define MCLK_PIN               GPIO_PIN_PA(30)
-#define MCLK_PERIPH            GPIO_PERIPH_A
-
-
-/*-------------------------------------------------------------------------*\
- * data types
-\*-------------------------------------------------------------------------*/
-/* SSC clocking data */
-struct ssc_clock_data {
-       /* CMR div */
-       unsigned int cmr_div;
-
-       /* Frame period (as needed by xCMR.PERIOD) */
-       unsigned int period;
-
-       /* The SSC clock rate these settings where calculated for */
-       unsigned long ssc_rate;
-};
-
-
-/*-------------------------------------------------------------------------*\
- * module data
-\*-------------------------------------------------------------------------*/
-static struct clk *_gclk0;
-static struct clk *_pll0;
-
-#define CODEC_CLK (_gclk0)
-
-
-/*-------------------------------------------------------------------------*\
- * Sound SOC operations
-\*-------------------------------------------------------------------------*/
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-static struct ssc_clock_data playpaq_wm8510_calc_ssc_clock(
-       struct snd_pcm_hw_params *params,
-       struct snd_soc_dai *cpu_dai)
-{
-       struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
-       struct ssc_device *ssc = ssc_p->ssc;
-       struct ssc_clock_data cd;
-       unsigned int rate, width_bits, channels;
-       unsigned int bitrate, ssc_div;
-       unsigned actual_rate;
-
-
-       /*
-        * Figure out required bitrate
-        */
-       rate = params_rate(params);
-       channels = params_channels(params);
-       width_bits = snd_pcm_format_physical_width(params_format(params));
-       bitrate = rate * width_bits * channels;
-
-
-       /*
-        * Figure out required SSC divider and period for required bitrate
-        */
-       cd.ssc_rate = clk_get_rate(ssc->clk);
-       ssc_div = cd.ssc_rate / bitrate;
-       cd.cmr_div = ssc_div / 2;
-       if (ssc_div & 1) {
-               /* round cmr_div up */
-               cd.cmr_div++;
-       }
-       cd.period = width_bits - 1;
-
-
-       /*
-        * Find actual rate, compare to requested rate
-        */
-       actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1));
-       pr_debug("playpaq_wm8510: Request rate = %u, actual rate = %u\n",
-                rate, actual_rate);
-
-
-       return cd;
-}
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-
-static int playpaq_wm8510_hw_params(struct snd_pcm_substream *substream,
-                                   struct snd_pcm_hw_params *params)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *codec_dai = rtd->codec_dai;
-       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-       struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
-       struct ssc_device *ssc = ssc_p->ssc;
-       unsigned int pll_out = 0, bclk = 0, mclk_div = 0;
-       int ret;
-
-
-       /* Due to difficulties with getting the correct clocks from the AT32's
-        * PLL0, we're going to let the CODEC be in charge of all the clocks
-        */
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
-                                 SND_SOC_DAIFMT_NB_NF |
-                                 SND_SOC_DAIFMT_CBM_CFM);
-#else
-       struct ssc_clock_data cd;
-       const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
-                                 SND_SOC_DAIFMT_NB_NF |
-                                 SND_SOC_DAIFMT_CBS_CFS);
-#endif
-
-       if (ssc == NULL) {
-               pr_warning("playpaq_wm8510_hw_params: ssc is NULL!\n");
-               return -EINVAL;
-       }
-
-
-       /*
-        * Figure out PLL and BCLK dividers for WM8510
-        */
-       switch (params_rate(params)) {
-       case 48000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_2;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 44100:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_2;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 22050:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_4;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 16000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_6;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 11025:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_8;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 8000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_12;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       default:
-               pr_warning("playpaq_wm8510: Unsupported sample rate %d\n",
-                          params_rate(params));
-               return -EINVAL;
-       }
-
-
-       /*
-        * set CPU and CODEC DAI configuration
-        */
-       ret = snd_soc_dai_set_fmt(codec_dai, fmt);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CODEC DAI format (%d)\n",
-                          ret);
-               return ret;
-       }
-       ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CPU DAI format (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       /*
-        * Set CPU clock configuration
-        */
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       cd = playpaq_wm8510_calc_ssc_clock(params, cpu_dai);
-       pr_debug("playpaq_wm8510: cmr_div = %d, period = %d\n",
-                cd.cmr_div, cd.period);
-       ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_CMR_DIV, cd.cmr_div);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CPU CMR_DIV (%d)\n",
-                          ret);
-               return ret;
-       }
-       ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_TCMR_PERIOD,
-                                         cd.period);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CPU transmit period (%d)\n",
-                          ret);
-               return ret;
-       }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-       /*
-        * Set CODEC clock configuration
-        */
-       pr_debug("playpaq_wm8510: "
-                "pll_in = %ld, pll_out = %u, bclk = %x, mclk = %x\n",
-                clk_get_rate(CODEC_CLK), pll_out, bclk, mclk_div);
-
-
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_BCLKDIV, bclk);
-       if (ret < 0) {
-               pr_warning
-                   ("playpaq_wm8510: Failed to set CODEC DAI BCLKDIV (%d)\n",
-                    ret);
-               return ret;
-       }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-       ret = snd_soc_dai_set_pll(codec_dai, 0, 0,
-                                        clk_get_rate(CODEC_CLK), pll_out);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CODEC DAI PLL (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_MCLKDIV, mclk_div);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CODEC MCLKDIV (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       return 0;
-}
-
-
-
-static struct snd_soc_ops playpaq_wm8510_ops = {
-       .hw_params = playpaq_wm8510_hw_params,
-};
-
-
-
-static const struct snd_soc_dapm_widget playpaq_dapm_widgets[] = {
-       SND_SOC_DAPM_MIC("Int Mic", NULL),
-       SND_SOC_DAPM_SPK("Ext Spk", NULL),
-};
-
-
-
-static const struct snd_soc_dapm_route intercon[] = {
-       /* speaker connected to SPKOUT */
-       {"Ext Spk", NULL, "SPKOUTP"},
-       {"Ext Spk", NULL, "SPKOUTN"},
-
-       {"Mic Bias", NULL, "Int Mic"},
-       {"MICN", NULL, "Mic Bias"},
-       {"MICP", NULL, "Mic Bias"},
-};
-
-
-
-static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
-{
-       struct snd_soc_codec *codec = rtd->codec;
-       struct snd_soc_dapm_context *dapm = &codec->dapm;
-       int i;
-
-       /*
-        * Add DAPM widgets
-        */
-       for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
-               snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
-
-
-
-       /*
-        * Setup audio path interconnects
-        */
-       snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
-
-
-
-       /* always connected pins */
-       snd_soc_dapm_enable_pin(dapm, "Int Mic");
-       snd_soc_dapm_enable_pin(dapm, "Ext Spk");
-
-
-
-       /* Make CSB show PLL rate */
-       snd_soc_dai_set_clkdiv(rtd->codec_dai, WM8510_OPCLKDIV,
-                                      WM8510_OPCLKDIV_1 | 4);
-
-       return 0;
-}
-
-
-
-static struct snd_soc_dai_link playpaq_wm8510_dai = {
-       .name = "WM8510",
-       .stream_name = "WM8510 PCM",
-       .cpu_dai_name= "atmel-ssc-dai.0",
-       .platform_name = "atmel-pcm-audio",
-       .codec_name = "wm8510-codec.0-0x1a",
-       .codec_dai_name = "wm8510-hifi",
-       .init = playpaq_wm8510_init,
-       .ops = &playpaq_wm8510_ops,
-};
-
-
-
-static struct snd_soc_card snd_soc_playpaq = {
-       .name = "LRS_PlayPaq_WM8510",
-       .dai_link = &playpaq_wm8510_dai,
-       .num_links = 1,
-};
-
-static struct platform_device *playpaq_snd_device;
-
-
-static int __init playpaq_asoc_init(void)
-{
-       int ret = 0;
-
-       /*
-        * Configure MCLK for WM8510
-        */
-       _gclk0 = clk_get(NULL, "gclk0");
-       if (IS_ERR(_gclk0)) {
-               _gclk0 = NULL;
-               ret = PTR_ERR(_gclk0);
-               goto err_gclk0;
-       }
-       _pll0 = clk_get(NULL, "pll0");
-       if (IS_ERR(_pll0)) {
-               _pll0 = NULL;
-               ret = PTR_ERR(_pll0);
-               goto err_pll0;
-       }
-       ret = clk_set_parent(_gclk0, _pll0);
-       if (ret) {
-               pr_warning("snd-soc-playpaq: "
-                          "Failed to set PLL0 as parent for DAC clock\n");
-               goto err_set_clk;
-       }
-       clk_set_rate(CODEC_CLK, 12000000);
-       clk_enable(CODEC_CLK);
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
-       at32_select_periph(MCLK_PIN, MCLK_PERIPH, 0);
-#endif
-
-
-       /*
-        * Create and register platform device
-        */
-       playpaq_snd_device = platform_device_alloc("soc-audio", 0);
-       if (playpaq_snd_device == NULL) {
-               ret = -ENOMEM;
-               goto err_device_alloc;
-       }
-
-       platform_set_drvdata(playpaq_snd_device, &snd_soc_playpaq);
-
-       ret = platform_device_add(playpaq_snd_device);
-       if (ret) {
-               pr_warning("playpaq_wm8510: platform_device_add failed (%d)\n",
-                          ret);
-               goto err_device_add;
-       }
-
-       return 0;
-
-
-err_device_add:
-       if (playpaq_snd_device != NULL) {
-               platform_device_put(playpaq_snd_device);
-               playpaq_snd_device = NULL;
-       }
-err_device_alloc:
-err_set_clk:
-       if (_pll0 != NULL) {
-               clk_put(_pll0);
-               _pll0 = NULL;
-       }
-err_pll0:
-       if (_gclk0 != NULL) {
-               clk_put(_gclk0);
-               _gclk0 = NULL;
-       }
-       return ret;
-}
-
-
-static void __exit playpaq_asoc_exit(void)
-{
-       if (_gclk0 != NULL) {
-               clk_put(_gclk0);
-               _gclk0 = NULL;
-       }
-       if (_pll0 != NULL) {
-               clk_put(_pll0);
-               _pll0 = NULL;
-       }
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
-       at32_free_pin(MCLK_PIN);
-#endif
-
-       platform_device_unregister(playpaq_snd_device);
-       playpaq_snd_device = NULL;
-}
-
-module_init(playpaq_asoc_init);
-module_exit(playpaq_asoc_exit);
-
-MODULE_AUTHOR("Geoffrey Wossum <gwossum@acm.org>");
-MODULE_DESCRIPTION("ASoC machine driver for LRS PlayPaq");
-MODULE_LICENSE("GPL");
index 4584514..fa787d4 100644 (file)
@@ -33,7 +33,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_CX20442
        select SND_SOC_DA7210 if I2C
        select SND_SOC_DFBMCS320
-       select SND_SOC_JZ4740_CODEC if SOC_JZ4740
+       select SND_SOC_JZ4740_CODEC
        select SND_SOC_LM4857 if I2C
        select SND_SOC_MAX98088 if I2C
        select SND_SOC_MAX98095 if I2C
index 444747f..dd7be0d 100644 (file)
@@ -34,7 +34,7 @@
 
 #define AD1836_ADC_CTRL2               13
 #define AD1836_ADC_WORD_LEN_MASK       0x30
-#define AD1836_ADC_WORD_OFFSET         5
+#define AD1836_ADC_WORD_OFFSET         4
 #define AD1836_ADC_SERFMT_MASK         (7 << 6)
 #define AD1836_ADC_SERFMT_PCK256       (0x4 << 6)
 #define AD1836_ADC_SERFMT_PCK128       (0x5 << 6)
index f1f237e..73f46eb 100644 (file)
@@ -601,7 +601,6 @@ static int cs4270_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
 static int cs4270_soc_resume(struct snd_soc_codec *codec)
 {
        struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
-       struct i2c_client *i2c_client = to_i2c_client(codec->dev);
        int reg;
 
        regulator_bulk_enable(ARRAY_SIZE(cs4270->supplies),
@@ -612,14 +611,7 @@ static int cs4270_soc_resume(struct snd_soc_codec *codec)
        ndelay(500);
 
        /* first restore the entire register cache ... */
-       for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) {
-               u8 val = snd_soc_read(codec, reg);
-
-               if (i2c_smbus_write_byte_data(i2c_client, reg, val)) {
-                       dev_err(codec->dev, "i2c write failed\n");
-                       return -EIO;
-               }
-       }
+       snd_soc_cache_sync(codec);
 
        /* ... then disable the power-down bits */
        reg = snd_soc_read(codec, CS4270_PWRCTL);
index 8c3c820..1ee6636 100644 (file)
@@ -555,7 +555,7 @@ static int cs42l51_probe(struct snd_soc_codec *codec)
 
 static struct snd_soc_codec_driver soc_codec_device_cs42l51 = {
        .probe =        cs42l51_probe,
-       .reg_cache_size = CS42L51_NUMREGS,
+       .reg_cache_size = CS42L51_NUMREGS + 1,
        .reg_word_size = sizeof(u8),
 };
 
index e373f8f..3e1f4e1 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <linux/delay.h>
 
index 9e7e964..dcf6f2a 100644 (file)
@@ -106,13 +106,13 @@ static int max9877_set_2reg(struct snd_kcontrol *kcontrol,
        unsigned int mask = mc->max;
        unsigned int val = (ucontrol->value.integer.value[0] & mask);
        unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
-       unsigned int change = 1;
+       unsigned int change = 0;
 
-       if (((max9877_regs[reg] >> shift) & mask) == val)
-               change = 0;
+       if (((max9877_regs[reg] >> shift) & mask) != val)
+               change = 1;
 
-       if (((max9877_regs[reg2] >> shift) & mask) == val2)
-               change = 0;
+       if (((max9877_regs[reg2] >> shift) & mask) != val2)
+               change = 1;
 
        if (change) {
                max9877_regs[reg] &= ~(mask << shift);
index c5ca8cf..0441893 100644 (file)
@@ -863,13 +863,13 @@ static struct i2c_driver uda1380_i2c_driver = {
 
 static int __init uda1380_modinit(void)
 {
-       int ret;
+       int ret = 0;
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
        ret = i2c_add_driver(&uda1380_i2c_driver);
        if (ret != 0)
                pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
 #endif
-       return 0;
+       return ret;
 }
 module_init(uda1380_modinit);
 
index 0293763..5a14d5c 100644 (file)
@@ -60,6 +60,8 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
        }
 
        if (memcmp(fw->data, "WMFW", 4) != 0) {
+               memcpy(&data32, fw->data, sizeof(data32));
+               data32 = be32_to_cpu(data32);
                dev_err(codec->dev, "%s: firmware has bad file magic %08x\n",
                        name, data32);
                goto err;
index 9c982e4..d0c545b 100644 (file)
@@ -1325,15 +1325,15 @@ SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
-                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
-                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+                       adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+                       adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
 };
 
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
@@ -2357,6 +2357,11 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
        bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT;
 
        lrclk = bclk_rate / params_rate(params);
+       if (!lrclk) {
+               dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n",
+                       bclk_rate);
+               return -EINVAL;
+       }
        dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
                lrclk, bclk_rate / lrclk);
 
@@ -3178,6 +3183,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
                switch (wm8994->revision) {
                case 0:
                case 1:
+               case 2:
+               case 3:
                        wm8994->hubs.dcs_codes_l = -9;
                        wm8994->hubs.dcs_codes_r = -5;
                        break;
index 645c980..a33b04d 100644 (file)
@@ -1968,6 +1968,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
                break;
        case 24576000:
                ratediv = WM8996_SYSCLK_DIV;
+               wm8996->sysclk /= 2;
        case 12288000:
                snd_soc_update_bits(codec, WM8996_AIF_RATE,
                                    WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);
index 31af405..ae49f1c 100644 (file)
@@ -392,7 +392,8 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
        }
 
        if (strcasecmp(sprop, "i2s-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
 
@@ -409,31 +410,38 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
                }
                machine_data->clk_frequency = be32_to_cpup(iprop);
        } else if (strcasecmp(sprop, "i2s-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "lj-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "lj-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "rj-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "rj-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "ac97-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "ac97-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else {
index b133bfc..7383917 100644 (file)
@@ -28,7 +28,7 @@ config SND_MXC_SOC_WM1133_EV1
 
 config SND_SOC_MX27VIS_AIC32X4
        tristate "SoC audio support for Visstrim M10 boards"
-       depends on MACH_IMX27_VISSTRIM_M10
+       depends on MACH_IMX27_VISSTRIM_M10 && I2C
        select SND_SOC_TLV320AIC32X4
        select SND_MXC_SOC_MX2
        help
index 8f49e16..c62d715 100644 (file)
@@ -12,6 +12,7 @@ config SND_KIRKWOOD_SOC_I2S
 config SND_KIRKWOOD_SOC_OPENRD
        tristate "SoC Audio support for Kirkwood Openrd Client"
        depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
+       depends on I2C
        select SND_KIRKWOOD_SOC_I2S
        select SND_SOC_CS42L51
        help
@@ -20,7 +21,7 @@ config SND_KIRKWOOD_SOC_OPENRD
 
 config SND_KIRKWOOD_SOC_T5325
        tristate "SoC Audio support for HP t5325"
-       depends on SND_KIRKWOOD_SOC && MACH_T5325
+       depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
        select SND_KIRKWOOD_SOC_I2S
        select SND_SOC_ALC5623
        help
index dea5aa4..f39d7dd 100644 (file)
@@ -357,3 +357,6 @@ static void __exit snd_mxs_pcm_exit(void)
        platform_driver_unregister(&mxs_pcm_driver);
 }
 module_exit(snd_mxs_pcm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-pcm-audio");
index 7fbeaec..1c57f66 100644 (file)
@@ -171,3 +171,4 @@ module_exit(mxs_sgtl5000_exit);
 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
 MODULE_DESCRIPTION("MXS ALSA SoC Machine driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-sgtl5000");
index ffd2242..a0f7d3c 100644 (file)
@@ -151,6 +151,7 @@ config SND_SOC_ZYLONITE
 config SND_SOC_RAUMFELD
        tristate "SoC Audio support Raumfeld audio adapter"
        depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR)
+       depends on I2C && SPI_MASTER
        select SND_PXA_SOC_SSP
        select SND_SOC_CS4270
        select SND_SOC_AK4104
@@ -159,7 +160,7 @@ config SND_SOC_RAUMFELD
 
 config SND_PXA2XX_SOC_HX4700
        tristate "SoC Audio support for HP iPAQ hx4700"
-       depends on SND_PXA2XX_SOC && MACH_H4700
+       depends on SND_PXA2XX_SOC && MACH_H4700 && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_AK4641
        help
index 65c1248..c664e33 100644 (file)
@@ -209,9 +209,10 @@ static int __devinit hx4700_audio_probe(struct platform_device *pdev)
        snd_soc_card_hx4700.dev = &pdev->dev;
        ret = snd_soc_register_card(&snd_soc_card_hx4700);
        if (ret)
-               return ret;
+               gpio_free_array(hx4700_audio_gpios,
+                               ARRAY_SIZE(hx4700_audio_gpios));
 
-       return 0;
+       return ret;
 }
 
 static int __devexit hx4700_audio_remove(struct platform_device *pdev)
index 1826acf..8e523fd 100644 (file)
@@ -101,7 +101,6 @@ static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_codec *codec = rtd->codec;
        struct snd_soc_dapm_context *dapm = &codec->dapm;
-       int err;
 
        /* These endpoints are not being used. */
        snd_soc_dapm_nc_pin(dapm, "LINPUT2");
@@ -131,7 +130,7 @@ static struct snd_soc_card snd_soc_machine_jive = {
        .dai_link       = &jive_dai,
        .num_links      = 1,
 
-       .dapm_widgtets  = wm8750_dapm_widgets,
+       .dapm_widgets   = wm8750_dapm_widgets,
        .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
        .dapm_routes    = audio_map,
        .num_dapm_routes = ARRAY_SIZE(audio_map),
index 3a0dbfc..8bd1dc5 100644 (file)
@@ -12,6 +12,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <sound/soc.h>
 
 static struct snd_soc_card smdk2443;
index f75e439..ad9ac42 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "../codecs/wm8994.h"
 #include <sound/pcm_params.h>
+#include <linux/module.h>
 
  /*
   * Default CFG switch settings to use this driver:
index 85bf541..4b8e354 100644 (file)
@@ -191,7 +191,7 @@ static int speyside_late_probe(struct snd_soc_card *card)
        snd_soc_dapm_ignore_suspend(&card->dapm, "Headset Mic");
        snd_soc_dapm_ignore_suspend(&card->dapm, "Main AMIC");
        snd_soc_dapm_ignore_suspend(&card->dapm, "Main DMIC");
-       snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
+       snd_soc_dapm_ignore_suspend(&card->dapm, "Main Speaker");
        snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Output");
        snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Input");
 
index a5d3685..a25fa63 100644 (file)
@@ -709,6 +709,12 @@ int snd_soc_resume(struct device *dev)
        struct snd_soc_card *card = dev_get_drvdata(dev);
        int i, ac97_control = 0;
 
+       /* If the initialization of this soc device failed, there is no codec
+        * associated with it. Just bail out in this case.
+        */
+       if (list_empty(&card->codec_dev_list))
+               return 0;
+
        /* AC97 devices might have other drivers hanging off them so
         * need to resume immediately.  Other drivers don't have that
         * problem and may take a substantial amount of time to resume
index 0c12b98..4220bb0 100644 (file)
@@ -58,7 +58,36 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
 }
 EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
 
-static struct snd_soc_platform_driver dummy_platform;
+static const struct snd_pcm_hardware dummy_dma_hardware = {
+       .formats                = 0xffffffff,
+       .channels_min           = 1,
+       .channels_max           = UINT_MAX,
+
+       /* Random values to keep userspace happy when checking constraints */
+       .info                   = SNDRV_PCM_INFO_INTERLEAVED |
+                                 SNDRV_PCM_INFO_BLOCK_TRANSFER,
+       .buffer_bytes_max       = 128*1024,
+       .period_bytes_min       = PAGE_SIZE,
+       .period_bytes_max       = PAGE_SIZE*2,
+       .periods_min            = 2,
+       .periods_max            = 128,
+};
+
+static int dummy_dma_open(struct snd_pcm_substream *substream)
+{
+       snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+
+       return 0;
+}
+
+static struct snd_pcm_ops dummy_dma_ops = {
+       .open           = dummy_dma_open,
+       .ioctl          = snd_pcm_lib_ioctl,
+};
+
+static struct snd_soc_platform_driver dummy_platform = {
+       .ops = &dummy_dma_ops,
+};
 
 static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
 {
index b61945f..32d2a21 100644 (file)
@@ -1632,6 +1632,37 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       /* Roland GAIA SH-01 */
+       USB_DEVICE(0x0582, 0x0111),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "Roland",
+               .product_name = "GAIA",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = &(const struct snd_usb_midi_endpoint_info) {
+                               .out_cables = 0x0003,
+                               .in_cables  = 0x0003
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 {
        USB_DEVICE(0x0582, 0x0113),
        .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
index 7d98676..955930e 100644 (file)
@@ -463,7 +463,8 @@ static int run_perf_stat(int argc __used, const char **argv)
 
        list_for_each_entry(counter, &evsel_list->entries, node) {
                if (create_perf_stat_counter(counter, first) < 0) {
-                       if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
+                       if (errno == EINVAL || errno == ENOSYS ||
+                           errno == ENOENT || errno == EOPNOTSUPP) {
                                if (verbose)
                                        ui__warning("%s event is not supported by the kernel.\n",
                                                    event_name(counter));
index e426264..d7915d4 100644 (file)
@@ -34,6 +34,16 @@ int __perf_evsel__sample_size(u64 sample_type)
        return size;
 }
 
+static void hists__init(struct hists *hists)
+{
+       memset(hists, 0, sizeof(*hists));
+       hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+       hists->entries_in = &hists->entries_in_array[0];
+       hists->entries_collapsed = RB_ROOT;
+       hists->entries = RB_ROOT;
+       pthread_mutex_init(&hists->lock, NULL);
+}
+
 void perf_evsel__init(struct perf_evsel *evsel,
                      struct perf_event_attr *attr, int idx)
 {
index bcd05d0..33c17a2 100644 (file)
@@ -388,7 +388,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
                /*
                 * write event string as passed on cmdline
                 */
-               ret = do_write_string(fd, attr->name);
+               ret = do_write_string(fd, event_name(attr));
                if (ret < 0)
                        return ret;
                /*
index a36a3fa..abef270 100644 (file)
@@ -1211,13 +1211,3 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
 
        return ret;
 }
-
-void hists__init(struct hists *hists)
-{
-       memset(hists, 0, sizeof(*hists));
-       hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
-       hists->entries_in = &hists->entries_in_array[0];
-       hists->entries_collapsed = RB_ROOT;
-       hists->entries = RB_ROOT;
-       pthread_mutex_init(&hists->lock, NULL);
-}
index c86c1d2..89289c8 100644 (file)
@@ -63,8 +63,6 @@ struct hists {
        struct callchain_cursor callchain_cursor;
 };
 
-void hists__init(struct hists *hists);
-
 struct hist_entry *__hists__add_entry(struct hists *self,
                                      struct addr_location *al,
                                      struct symbol *parent, u64 period);
index 85c1e6b..0f4555c 100644 (file)
@@ -1333,6 +1333,10 @@ int perf_session__cpu_bitmap(struct perf_session *session,
        }
 
        map = cpu_map__new(cpu_list);
+       if (map == NULL) {
+               pr_err("Invalid cpu_list\n");
+               return -1;
+       }
 
        for (i = 0; i < map->nr; i++) {
                int cpu = map->map[i];
index 0a7ed5b..6c164dc 100644 (file)
@@ -1537,6 +1537,8 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
        field = malloc_or_die(sizeof(*field));
 
        type = process_arg(event, field, &token);
+       while (type == EVENT_OP)
+               type = process_op(event, field, &token);
        if (test_type_token(type, token, EVENT_DELIM, ","))
                goto out_free;