Merge tag 'md/3.17-more-fixes' of git://git.neil.brown.name/md
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Sep 2014 15:53:33 +0000 (08:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Sep 2014 15:53:33 +0000 (08:53 -0700)
Pull bugfixes for md/raid1 from Neil Brown:
 "It is amazing how much easier it is to find bugs when you know one is
  there.  Two bug reports resulted in finding 7 bugs!

  All are tagged for -stable.  Those that can't cause (rare) data
  corruption, cause lockups.

  Particularly, but not only, fixing new "resync" code"

* tag 'md/3.17-more-fixes' of git://git.neil.brown.name/md:
  md/raid1: fix_read_error should act on all non-faulty devices.
  md/raid1: count resync requests in nr_pending.
  md/raid1: update next_resync under resync_lock.
  md/raid1: Don't use next_resync to determine how far resync has progressed
  md/raid1: make sure resync waits for conflicting writes to complete.
  md/raid1: clean up request counts properly in close_sync()
  md/raid1:  be more cautious where we read-balance during resync.
  md/raid1: intialise start_next_window for READ case to avoid hang

498 files changed:
Documentation/devicetree/bindings/dma/rcar-audmapp.txt
Documentation/devicetree/bindings/input/atmel,maxtouch.txt
Documentation/devicetree/bindings/sound/rockchip-i2s.txt
Documentation/devicetree/bindings/spi/spi-rockchip.txt
Documentation/devicetree/bindings/usb/mxs-phy.txt
Documentation/devicetree/bindings/video/analog-tv-connector.txt
Documentation/kernel-parameters.txt
Documentation/networking/filter.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/omap3-n900.dts
arch/arm/include/asm/tls.h
arch/arm/include/asm/uaccess.h
arch/arm/include/asm/xen/page-coherent.h
arch/arm/include/asm/xen/page.h
arch/arm/kernel/armksyms.c
arch/arm/kernel/irq.c
arch/arm/kernel/perf_event_cpu.c
arch/arm/kernel/process.c
arch/arm/kernel/swp_emulate.c
arch/arm/kernel/thumbee.c
arch/arm/kernel/traps.c
arch/arm/lib/getuser.S
arch/arm/mm/proc-v7-3level.S
arch/arm/xen/Makefile
arch/arm/xen/enlighten.c
arch/arm/xen/mm32.c [new file with mode: 0644]
arch/arm/xen/p2m.c
arch/arm64/kernel/irq.c
arch/arm64/kernel/process.c
arch/arm64/kernel/sys_compat.c
arch/arm64/mm/init.c
arch/ia64/configs/bigsur_defconfig
arch/ia64/configs/generic_defconfig
arch/ia64/configs/gensparse_defconfig
arch/ia64/configs/sim_defconfig
arch/ia64/configs/tiger_defconfig
arch/ia64/configs/zx1_defconfig
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/pci/fixup.c
arch/microblaze/Kconfig
arch/microblaze/include/asm/entry.h
arch/microblaze/include/asm/uaccess.h
arch/microblaze/include/asm/unistd.h
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/bcm63xx/irq.c
arch/mips/boot/compressed/decompress.c
arch/mips/include/asm/cop2.h
arch/mips/include/asm/mach-ip28/spaces.h
arch/mips/include/asm/page.h
arch/mips/include/asm/smp.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/topology.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/machine_kexec.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/mm/init.c
arch/mips/net/bpf_jit.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/hpux/sys_hpux.c
arch/parisc/include/asm/seccomp.h [new file with mode: 0644]
arch/parisc/include/asm/thread_info.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/syscall_table.S
arch/powerpc/configs/cell_defconfig
arch/powerpc/configs/celleb_defconfig
arch/powerpc/configs/corenet64_smp_defconfig
arch/powerpc/configs/g5_defconfig
arch/powerpc/configs/maple_defconfig
arch/powerpc/configs/pasemi_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/ppc64e_defconfig
arch/powerpc/configs/ps3_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/configs/pseries_le_defconfig
arch/powerpc/include/asm/ptrace.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/perf/callchain.c
arch/powerpc/platforms/powernv/opal-hmi.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/s390/include/asm/ipl.h
arch/s390/kernel/ipl.c
arch/s390/kernel/vdso32/clock_gettime.S
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/mm/init.c
arch/sh/mm/gup.c
arch/sparc/net/bpf_jit_comp.c
arch/x86/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/include/asm/bitops.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/pgtable_64.h
arch/x86/kernel/kprobes/opt.c
arch/x86/mm/dump_pagetables.c
arch/x86/mm/mmap.c
arch/x86/pci/fixup.c
arch/x86/xen/mmu.c
block/blk-exec.c
block/blk-merge.c
block/blk-mq.c
block/blk-sysfs.c
block/genhd.c
block/partition-generic.c
crypto/drbg.c
drivers/acpi/acpi_cmos_rtc.c
drivers/acpi/acpi_lpss.c
drivers/acpi/battery.c
drivers/acpi/bus.c
drivers/base/regmap/regmap-debugfs.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/null_blk.c
drivers/block/rbd.c
drivers/char/hw_random/virtio-rng.c
drivers/clk/at91/clk-slow.c
drivers/clk/clk-efm32gg.c
drivers/clk/clk.c
drivers/clk/qcom/gcc-ipq806x.c
drivers/clk/rockchip/clk-rk3288.c
drivers/clk/ti/clk-dra7-atl.c
drivers/clk/ti/divider.c
drivers/cpufreq/cpufreq_opp.c
drivers/dma/dma-jz4740.c
drivers/firmware/efi/libstub/fdt.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/ni_dma.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_dma.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_semaphore.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/vga/vga_switcheroo.c
drivers/gpu/vga/vgaarb.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/tmp103.c
drivers/iio/accel/bma180.c
drivers/iio/adc/ad_sigma_delta.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/xilinx-xadc-core.c
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
drivers/iio/common/st_sensors/st_sensors_trigger.c
drivers/iio/gyro/itg3200_buffer.c
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
drivers/iio/inkern.c
drivers/iio/magnetometer/st_magn_core.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/uverbs_marshall.c
drivers/infiniband/hw/ipath/ipath_user_pages.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qib/qib_debugfs.c
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/hw/qib/qib_user_pages.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/cap1106.c
drivers/input/keyboard/matrix_keypad.c
drivers/input/mouse/alps.c
drivers/input/mouse/elantech.c
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics.h
drivers/input/mouse/synaptics_usb.c
drivers/input/mouse/trackpoint.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/i8042.c
drivers/input/serio/serport.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/wm9712.c
drivers/input/touchscreen/wm9713.c
drivers/iommu/arm-smmu.c
drivers/iommu/dmar.c
drivers/iommu/fsl_pamu_domain.c
drivers/iommu/iommu.c
drivers/irqchip/exynos-combiner.c
drivers/irqchip/irq-crossbar.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/md/dm-cache-target.c
drivers/media/Kconfig
drivers/media/dvb-core/dvb-usb-ids.h
drivers/media/dvb-frontends/af9033.c
drivers/media/dvb-frontends/af9033_priv.h
drivers/media/i2c/smiapp/smiapp-core.c
drivers/media/pci/cx18/cx18-driver.c
drivers/media/tuners/tuner_it913x.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/message/fusion/Kconfig
drivers/misc/lattice-ecp3-config.c
drivers/net/bonding/bond_main.c
drivers/net/can/at91_can.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/flexcan.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/macvlan.c
drivers/net/phy/micrel.c
drivers/net/usb/r8152.c
drivers/net/wireless/ath/ath9k/common-beacon.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/brcm80211/Kconfig
drivers/net/wireless/brcm80211/brcmfmac/Makefile
drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
drivers/net/wireless/brcm80211/brcmfmac/fweh.c
drivers/net/wireless/brcm80211/brcmfmac/fweh.h
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/iwlwifi/dvm/power.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/sf.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/ntb/ntb_transport.c
drivers/parisc/dino.c
drivers/parisc/pdc_stable.c
drivers/pci/host/pci-imx6.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/hotplug/pcihp_slot.c
drivers/phy/Kconfig
drivers/phy/phy-exynos5-usbdrd.c
drivers/phy/phy-miphy365x.c
drivers/phy/phy-twl4030-usb.c
drivers/pinctrl/pinctrl-baytrail.c
drivers/regulator/88pm8607.c
drivers/regulator/da9052-regulator.c
drivers/regulator/max8907-regulator.c
drivers/regulator/max8925-regulator.c
drivers/regulator/max8997.c
drivers/regulator/palmas-regulator.c
drivers/regulator/tps65910-regulator.c
drivers/s390/block/dasd_devmap.c
drivers/scsi/Kconfig
drivers/scsi/bnx2fc/Kconfig
drivers/scsi/bnx2i/Kconfig
drivers/scsi/csiostor/Kconfig
drivers/scsi/libiscsi.c
drivers/scsi/qla2xxx/Kconfig
drivers/scsi/scsi_lib.c
drivers/spi/spi-davinci.c
drivers/spi/spi-dw.c
drivers/spi/spi-fsl-espi.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-pl022.c
drivers/spi/spi-rockchip.c
drivers/spi/spi-sirf.c
drivers/staging/android/sync.c
drivers/staging/iio/meter/ade7758_trigger.c
drivers/staging/imx-drm/imx-ldb.c
drivers/staging/imx-drm/ipuv3-plane.c
drivers/staging/lustre/lustre/llite/llite_lib.c
drivers/staging/vt6655/hostap.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_configfs.c
drivers/target/target_core_spc.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/xilinx_uartps.c
drivers/usb/chipidea/ci_hdrc_msm.c
drivers/usb/core/hub.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/u_fs.h
drivers/usb/gadget/udc/fusb300_udc.h
drivers/usb/gadget/udc/net2280.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci.c
drivers/usb/musb/musb_cppi41.c
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/phy/phy-tegra-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/mod.c
drivers/usb/renesas_usbhs/pipe.c
drivers/usb/renesas_usbhs/pipe.h
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/sierra.c
drivers/usb/serial/zte_ev.c
drivers/usb/storage/uas-detect.h
drivers/usb/storage/unusual_devs.h
drivers/uwb/lc-dev.c
drivers/video/fbdev/amba-clcd.c
drivers/xen/balloon.c
drivers/xen/gntalloc.c
drivers/xen/manage.c
fs/btrfs/btrfs_inode.h
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/tree-log.c
fs/btrfs/tree-log.h
fs/btrfs/volumes.c
fs/buffer.c
fs/cachefiles/namei.c
fs/cachefiles/rdwr.c
fs/cifs/Kconfig
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/link.c
fs/cifs/netmisc.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smb2file.c
fs/cifs/smb2inode.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/dcache.c
fs/eventpoll.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/fscache/object.c
fs/fscache/page.c
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/super.c
fs/lockd/svc.c
fs/namei.c
fs/nfs/client.c
fs/nfs/filelayout/filelayout.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/notify/fdinfo.c
fs/udf/ialloc.c
fs/udf/inode.c
fs/udf/namei.c
fs/udf/super.c
fs/udf/udfdecl.h
include/acpi/acpi_bus.h
include/crypto/drbg.h
include/linux/dcache.h
include/linux/hash.h
include/linux/iio/trigger.h
include/linux/jiffies.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/pci.h
include/linux/vga_switcheroo.h
include/linux/vgaarb.h
include/linux/workqueue.h
include/net/addrconf.h
include/net/dst.h
include/net/genetlink.h
include/net/sch_generic.h
include/rdma/ib_umem.h
include/scsi/scsi_tcq.h
include/uapi/linux/Kbuild
include/uapi/linux/input.h
include/xen/interface/features.h
init/do_mounts.c
kernel/cgroup.c
kernel/events/core.c
kernel/futex.c
kernel/kcmp.c
kernel/printk/printk.c
kernel/time/alarmtimer.c
kernel/time/time.c
lib/Kconfig
lib/assoc_array.c
lib/hweight.c
lib/rhashtable.c
lib/string.c
mm/dmapool.c
mm/memblock.c
mm/memory.c
mm/mmap.c
mm/nobootmem.c
net/bridge/br_private.h
net/bridge/br_vlan.c
net/ceph/auth_x.c
net/ceph/mon_client.c
net/core/dev.c
net/core/sock.c
net/ipv4/ip_tunnel.c
net/ipv4/route.c
net/ipv6/addrconf.c
net/ipv6/anycast.c
net/ipv6/ip6_output.c
net/mac80211/sta_info.c
net/openvswitch/datapath.c
net/rfkill/rfkill-gpio.c
net/rxrpc/ar-key.c
net/sched/sch_choke.c
net/socket.c
net/wireless/nl80211.c
net/xfrm/xfrm_policy.c
scripts/checkpatch.pl
sound/core/pcm_lib.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/cs4265.c
sound/soc/codecs/sta529.c
sound/soc/codecs/tlv320aic31xx.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/dwc/designware_i2s.c
sound/soc/rockchip/rockchip_i2s.c
sound/soc/samsung/i2s.c
sound/soc/soc-compress.c
sound/soc/soc-pcm.c
sound/soc/spear/spear_pcm.c
sound/usb/caiaq/control.c
tools/usb/usbip/libsrc/usbip_common.h
virt/kvm/arm/vgic-v2.c
virt/kvm/kvm_main.c

index 9f1d750..61bca50 100644 (file)
@@ -16,9 +16,9 @@ Example:
 * DMA client
 
 Required properties:
-- dmas:                a list of <[DMA multiplexer phandle] [SRS/DRS value]> pairs,
-               where SRS/DRS values are fixed handles, specified in the SoC
-               manual as the value that would be written into the PDMACHCR.
+- dmas:                a list of <[DMA multiplexer phandle] [SRS << 8 | DRS]> pairs.
+               where SRS/DRS are specified in the SoC manual.
+               It will be written into PDMACHCR as high 16-bit parts.
 - dma-names:   a list of DMA channel names, one per "dmas" entry
 
 Example:
index 0ac23f2..1852906 100644 (file)
@@ -11,10 +11,6 @@ Required properties:
 
 Optional properties for main touchpad device:
 
-- linux,gpio-keymap: An array of up to 4 entries indicating the Linux
-    keycode generated by each GPIO. Linux keycodes are defined in
-    <dt-bindings/input/input.h>.
-
 - linux,gpio-keymap: When enabled, the SPT_GPIOPWN_T19 object sends messages
     on GPIO bit changes. An array of up to 8 entries can be provided
     indicating the Linux keycode mapped to each bit of the status byte,
index 6c55fcf..9b82c20 100644 (file)
@@ -31,7 +31,7 @@ i2s@ff890000 {
        #address-cells = <1>;
        #size-cells = <0>;
        dmas = <&pdma1 0>, <&pdma1 1>;
-       dma-names = "rx", "tx";
+       dma-names = "tx", "rx";
        clock-names = "i2s_hclk", "i2s_clk";
        clocks = <&cru HCLK_I2S0>, <&cru SCLK_I2S0>;
 };
index 7bab355..467dec4 100644 (file)
@@ -16,11 +16,15 @@ Required Properties:
 - clocks: Must contain an entry for each entry in clock-names.
 - clock-names: Shall be "spiclk" for the transfer-clock, and "apb_pclk" for
                           the peripheral clock.
+- #address-cells: should be 1.
+- #size-cells: should be 0.
+
+Optional Properties:
+
 - dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
                Documentation/devicetree/bindings/dma/dma.txt
 - dma-names: DMA request names should include "tx" and "rx" if present.
-- #address-cells: should be 1.
-- #size-cells: should be 0.
+
 
 Example:
 
index cef181a..96681c9 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
        * "fsl,imx23-usbphy" for imx23 and imx28
        * "fsl,imx6q-usbphy" for imx6dq and imx6dl
        * "fsl,imx6sl-usbphy" for imx6sl
+       * "fsl,imx6sx-usbphy" for imx6sx
   "fsl,imx23-usbphy" is still a fallback for other strings
 - reg: Should contain registers location and length
 - interrupts: Should contain phy interrupt
index 0218fcd..0c0970c 100644 (file)
@@ -2,7 +2,7 @@ Analog TV Connector
 ===================
 
 Required properties:
-- compatible: "composite-connector" or "svideo-connector"
+- compatible: "composite-video-connector" or "svideo-connector"
 
 Optional properties:
 - label: a symbolic name for the connector
@@ -14,7 +14,7 @@ Example
 -------
 
 tv: connector {
-       compatible = "composite-connector";
+       compatible = "composite-video-connector";
        label = "tv";
 
        port {
index 5ae8608..10d51c2 100644 (file)
@@ -3541,6 +3541,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                        bogus residue values);
                                s = SINGLE_LUN (the device has only one
                                        Logical Unit);
+                               u = IGNORE_UAS (don't bind to the uas driver);
                                w = NO_WP_DETECT (don't test whether the
                                        medium is write-protected).
                        Example: quirks=0419:aaf5:rl,0421:0433:rc
index c48a970..d16f424 100644 (file)
@@ -462,9 +462,9 @@ JIT compiler
 ------------
 
 The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC,
-ARM and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler is
-transparently invoked for each attached filter from user space or for internal
-kernel users if it has been previously enabled by root:
+ARM, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler
+is transparently invoked for each attached filter from user space or for
+internal kernel users if it has been previously enabled by root:
 
   echo 1 > /proc/sys/net/core/bpf_jit_enable
 
index 5e7866a..670b3dc 100644 (file)
@@ -6424,7 +6424,8 @@ F:        Documentation/scsi/NinjaSCSI.txt
 F:     drivers/scsi/nsp32*
 
 NTB DRIVER
-M:     Jon Mason <jon.mason@intel.com>
+M:     Jon Mason <jdmason@kudzu.us>
+M:     Dave Jiang <dave.jiang@intel.com>
 S:     Supported
 W:     https://github.com/jonmason/ntb/wiki
 T:     git git://github.com/jonmason/ntb.git
@@ -6875,7 +6876,7 @@ F:        arch/x86/kernel/quirks.c
 
 PCI DRIVER FOR IMX6
 M:     Richard Zhu <r65037@freescale.com>
-M:     Shawn Guo <shawn.guo@freescale.com>
+M:     Lucas Stach <l.stach@pengutronix.de>
 L:     linux-pci@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -7053,7 +7054,7 @@ S:        Maintained
 F:     drivers/pinctrl/sh-pfc/
 
 PIN CONTROLLER - SAMSUNG
-M:     Tomasz Figa <t.figa@samsung.com>
+M:     Tomasz Figa <tomasz.figa@gmail.com>
 M:     Thomas Abraham <thomas.abraham@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -7899,7 +7900,8 @@ S:        Supported
 F:     drivers/media/i2c/s5k5baf.c
 
 SAMSUNG SOC CLOCK DRIVERS
-M:     Tomasz Figa <t.figa@samsung.com>
+M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
+M:     Tomasz Figa <tomasz.figa@gmail.com>
 S:     Supported
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 F:     drivers/clk/samsung/
@@ -7912,6 +7914,19 @@ S:       Supported
 L:     netdev@vger.kernel.org
 F:     drivers/net/ethernet/samsung/sxgbe/
 
+SAMSUNG USB2 PHY DRIVER
+M:     Kamil Debski <k.debski@samsung.com>
+L:     linux-kernel@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/phy/samsung-phy.txt
+F:     Documentation/phy/samsung-usb2.txt
+F:     drivers/phy/phy-exynos4210-usb2.c
+F:     drivers/phy/phy-exynos4x12-usb2.c
+F:     drivers/phy/phy-exynos5250-usb2.c
+F:     drivers/phy/phy-s5pv210-usb2.c
+F:     drivers/phy/phy-samsung-usb2.c
+F:     drivers/phy/phy-samsung-usb2.h
+
 SERIAL DRIVERS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-serial@vger.kernel.org
index 1a60bdd..a192280 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 17
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index 1fe45d1..4361777 100644 (file)
@@ -93,7 +93,7 @@
        };
 
        tv: connector {
-               compatible = "composite-connector";
+               compatible = "composite-video-connector";
                label = "tv";
 
                port {
index 83259b8..36172ad 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef __ASMARM_TLS_H
 #define __ASMARM_TLS_H
 
+#include <linux/compiler.h>
+#include <asm/thread_info.h>
+
 #ifdef __ASSEMBLY__
 #include <asm/asm-offsets.h>
        .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
 #endif
 
 #ifndef __ASSEMBLY__
+
+static inline void set_tls(unsigned long val)
+{
+       struct thread_info *thread;
+
+       thread = current_thread_info();
+
+       thread->tp_value[0] = val;
+
+       /*
+        * This code runs with preemption enabled and therefore must
+        * be reentrant with respect to switch_tls.
+        *
+        * We need to ensure ordering between the shadow state and the
+        * hardware state, so that we don't corrupt the hardware state
+        * with a stale shadow state during context switch.
+        *
+        * If we're preempted here, switch_tls will load TPIDRURO from
+        * thread_info upon resuming execution and the following mcr
+        * is merely redundant.
+        */
+       barrier();
+
+       if (!tls_emu) {
+               if (has_tls_reg) {
+                       asm("mcr p15, 0, %0, c13, c0, 3"
+                           : : "r" (val));
+               } else {
+                       /*
+                        * User space must never try to access this
+                        * directly.  Expect your app to break
+                        * eventually if you do so.  The user helper
+                        * at 0xffff0fe0 must be used instead.  (see
+                        * entry-armv.S for details)
+                        */
+                       *((unsigned int *)0xffff0ff0) = val;
+               }
+
+       }
+}
+
 static inline unsigned long get_tpuser(void)
 {
        unsigned long reg = 0;
@@ -59,5 +103,23 @@ static inline unsigned long get_tpuser(void)
 
        return reg;
 }
+
+static inline void set_tpuser(unsigned long val)
+{
+       /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
+        * we need not update thread_info.
+        */
+       if (has_tls_reg && !tls_emu) {
+               asm("mcr p15, 0, %0, c13, c0, 2"
+                   : : "r" (val));
+       }
+}
+
+static inline void flush_tls(void)
+{
+       set_tls(0);
+       set_tpuser(0);
+}
+
 #endif
 #endif /* __ASMARM_TLS_H */
index a4cd7af..4767eb9 100644 (file)
@@ -107,8 +107,11 @@ static inline void set_fs(mm_segment_t fs)
 extern int __get_user_1(void *);
 extern int __get_user_2(void *);
 extern int __get_user_4(void *);
-extern int __get_user_lo8(void *);
+extern int __get_user_32t_8(void *);
 extern int __get_user_8(void *);
+extern int __get_user_64t_1(void *);
+extern int __get_user_64t_2(void *);
+extern int __get_user_64t_4(void *);
 
 #define __GUP_CLOBBER_1        "lr", "cc"
 #ifdef CONFIG_CPU_USE_DOMAINS
@@ -117,7 +120,7 @@ extern int __get_user_8(void *);
 #define __GUP_CLOBBER_2 "lr", "cc"
 #endif
 #define __GUP_CLOBBER_4        "lr", "cc"
-#define __GUP_CLOBBER_lo8 "lr", "cc"
+#define __GUP_CLOBBER_32t_8 "lr", "cc"
 #define __GUP_CLOBBER_8        "lr", "cc"
 
 #define __get_user_x(__r2,__p,__e,__l,__s)                             \
@@ -131,12 +134,30 @@ extern int __get_user_8(void *);
 
 /* narrowing a double-word get into a single 32bit word register: */
 #ifdef __ARMEB__
-#define __get_user_xb(__r2, __p, __e, __l, __s)                                \
-       __get_user_x(__r2, __p, __e, __l, lo8)
+#define __get_user_x_32t(__r2, __p, __e, __l, __s)                             \
+       __get_user_x(__r2, __p, __e, __l, 32t_8)
 #else
-#define __get_user_xb __get_user_x
+#define __get_user_x_32t __get_user_x
 #endif
 
+/*
+ * storing result into proper least significant word of 64bit target var,
+ * different only for big endian case where 64 bit __r2 lsw is r3:
+ */
+#ifdef __ARMEB__
+#define __get_user_x_64t(__r2, __p, __e, __l, __s)                     \
+          __asm__ __volatile__ (                                       \
+               __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
+               __asmeq("%3", "r1")                                     \
+               "bl     __get_user_64t_" #__s                           \
+               : "=&r" (__e), "=r" (__r2)                              \
+               : "0" (__p), "r" (__l)                                  \
+               : __GUP_CLOBBER_##__s)
+#else
+#define __get_user_x_64t __get_user_x
+#endif
+
+
 #define __get_user_check(x,p)                                                  \
        ({                                                              \
                unsigned long __limit = current_thread_info()->addr_limit - 1; \
@@ -146,17 +167,26 @@ extern int __get_user_8(void *);
                register int __e asm("r0");                             \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
-                       __get_user_x(__r2, __p, __e, __l, 1);           \
+                       if (sizeof((x)) >= 8)                           \
+                               __get_user_x_64t(__r2, __p, __e, __l, 1); \
+                       else                                            \
+                               __get_user_x(__r2, __p, __e, __l, 1);   \
                        break;                                          \
                case 2:                                                 \
-                       __get_user_x(__r2, __p, __e, __l, 2);           \
+                       if (sizeof((x)) >= 8)                           \
+                               __get_user_x_64t(__r2, __p, __e, __l, 2); \
+                       else                                            \
+                               __get_user_x(__r2, __p, __e, __l, 2);   \
                        break;                                          \
                case 4:                                                 \
-                       __get_user_x(__r2, __p, __e, __l, 4);           \
+                       if (sizeof((x)) >= 8)                           \
+                               __get_user_x_64t(__r2, __p, __e, __l, 4); \
+                       else                                            \
+                               __get_user_x(__r2, __p, __e, __l, 4);   \
                        break;                                          \
                case 8:                                                 \
                        if (sizeof((x)) < 8)                            \
-                               __get_user_xb(__r2, __p, __e, __l, 4);  \
+                               __get_user_x_32t(__r2, __p, __e, __l, 4); \
                        else                                            \
                                __get_user_x(__r2, __p, __e, __l, 8);   \
                        break;                                          \
index 1109017..e8275ea 100644 (file)
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
        __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
 }
 
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
-{
-       if (__generic_dma_ops(hwdev)->unmap_page)
-               __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-}
+               struct dma_attrs *attrs);
 
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
-               __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-}
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir);
 
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       if (__generic_dma_ops(hwdev)->sync_single_for_device)
-               __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-}
 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
index ded062f..135c24a 100644 (file)
@@ -33,7 +33,6 @@ typedef struct xpaddr {
 #define INVALID_P2M_ENTRY      (~0UL)
 
 unsigned long __pfn_to_mfn(unsigned long pfn);
-unsigned long __mfn_to_pfn(unsigned long mfn);
 extern struct rb_root phys_to_mach;
 
 static inline unsigned long pfn_to_mfn(unsigned long pfn)
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
 
 static inline unsigned long mfn_to_pfn(unsigned long mfn)
 {
-       unsigned long pfn;
-
-       if (phys_to_mach.rb_node != NULL) {
-               pfn = __mfn_to_pfn(mfn);
-               if (pfn != INVALID_P2M_ENTRY)
-                       return pfn;
-       }
-
        return mfn;
 }
 
index f7b450f..a88671c 100644 (file)
@@ -98,6 +98,14 @@ EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
 EXPORT_SYMBOL(__get_user_4);
+EXPORT_SYMBOL(__get_user_8);
+
+#ifdef __ARMEB__
+EXPORT_SYMBOL(__get_user_64t_1);
+EXPORT_SYMBOL(__get_user_64t_2);
+EXPORT_SYMBOL(__get_user_64t_4);
+EXPORT_SYMBOL(__get_user_32t_8);
+#endif
 
 EXPORT_SYMBOL(__put_user_1);
 EXPORT_SYMBOL(__put_user_2);
index 2c42576..5c4d38e 100644 (file)
@@ -175,7 +175,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
        c = irq_data_get_irq_chip(d);
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-       else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+       else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
                cpumask_copy(d->affinity, affinity);
 
        return ret;
index e6a6edb..4bf4cce 100644 (file)
@@ -76,21 +76,15 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
 
 static void cpu_pmu_enable_percpu_irq(void *data)
 {
-       struct arm_pmu *cpu_pmu = data;
-       struct platform_device *pmu_device = cpu_pmu->plat_device;
-       int irq = platform_get_irq(pmu_device, 0);
+       int irq = *(int *)data;
 
        enable_percpu_irq(irq, IRQ_TYPE_NONE);
-       cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
 }
 
 static void cpu_pmu_disable_percpu_irq(void *data)
 {
-       struct arm_pmu *cpu_pmu = data;
-       struct platform_device *pmu_device = cpu_pmu->plat_device;
-       int irq = platform_get_irq(pmu_device, 0);
+       int irq = *(int *)data;
 
-       cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
        disable_percpu_irq(irq);
 }
 
@@ -103,7 +97,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
 
        irq = platform_get_irq(pmu_device, 0);
        if (irq >= 0 && irq_is_percpu(irq)) {
-               on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1);
+               on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
                free_percpu_irq(irq, &percpu_pmu);
        } else {
                for (i = 0; i < irqs; ++i) {
@@ -138,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
                                irq);
                        return err;
                }
-               on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1);
+               on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
        } else {
                for (i = 0; i < irqs; ++i) {
                        err = 0;
index 81ef686..a35f6eb 100644 (file)
@@ -334,6 +334,8 @@ void flush_thread(void)
        memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
        memset(&thread->fpstate, 0, sizeof(union fp_state));
 
+       flush_tls();
+
        thread_notify(THREAD_NOTIFY_FLUSH, thread);
 }
 
index 67ca857..587fdfe 100644 (file)
@@ -142,14 +142,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
        while (1) {
                unsigned long temp;
 
-               /*
-                * Barrier required between accessing protected resource and
-                * releasing a lock for it. Legacy code might not have done
-                * this, and we cannot determine that this is not the case
-                * being emulated, so insert always.
-                */
-               smp_mb();
-
                if (type == TYPE_SWPB)
                        __user_swpb_asm(*data, address, res, temp);
                else
@@ -162,13 +154,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
        }
 
        if (res == 0) {
-               /*
-                * Barrier also required between acquiring a lock for a
-                * protected resource and accessing the resource. Inserted for
-                * same reason as above.
-                */
-               smp_mb();
-
                if (type == TYPE_SWPB)
                        swpbcounter++;
                else
index 7b8403b..80f0d69 100644 (file)
@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
 
        switch (cmd) {
        case THREAD_NOTIFY_FLUSH:
-               thread->thumbee_state = 0;
+               teehbr_write(0);
                break;
        case THREAD_NOTIFY_SWITCH:
                current_thread_info()->thumbee_state = teehbr_read();
index c8e4bb7..a964c9f 100644 (file)
@@ -581,7 +581,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
 #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
 asmlinkage int arm_syscall(int no, struct pt_regs *regs)
 {
-       struct thread_info *thread = current_thread_info();
        siginfo_t info;
 
        if ((no >> 16) != (__ARM_NR_BASE>> 16))
@@ -632,21 +631,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
                return regs->ARM_r0;
 
        case NR(set_tls):
-               thread->tp_value[0] = regs->ARM_r0;
-               if (tls_emu)
-                       return 0;
-               if (has_tls_reg) {
-                       asm ("mcr p15, 0, %0, c13, c0, 3"
-                               : : "r" (regs->ARM_r0));
-               } else {
-                       /*
-                        * User space must never try to access this directly.
-                        * Expect your app to break eventually if you do so.
-                        * The user helper at 0xffff0fe0 must be used instead.
-                        * (see entry-armv.S for details)
-                        */
-                       *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
-               }
+               set_tls(regs->ARM_r0);
                return 0;
 
 #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
index 9386000..8ecfd15 100644 (file)
@@ -80,7 +80,7 @@ ENTRY(__get_user_8)
 ENDPROC(__get_user_8)
 
 #ifdef __ARMEB__
-ENTRY(__get_user_lo8)
+ENTRY(__get_user_32t_8)
        check_uaccess r0, 8, r1, r2, __get_user_bad
 #ifdef CONFIG_CPU_USE_DOMAINS
        add     r0, r0, #4
@@ -90,7 +90,37 @@ ENTRY(__get_user_lo8)
 #endif
        mov     r0, #0
        ret     lr
-ENDPROC(__get_user_lo8)
+ENDPROC(__get_user_32t_8)
+
+ENTRY(__get_user_64t_1)
+       check_uaccess r0, 1, r1, r2, __get_user_bad8
+8: TUSER(ldrb) r3, [r0]
+       mov     r0, #0
+       ret     lr
+ENDPROC(__get_user_64t_1)
+
+ENTRY(__get_user_64t_2)
+       check_uaccess r0, 2, r1, r2, __get_user_bad8
+#ifdef CONFIG_CPU_USE_DOMAINS
+rb     .req    ip
+9:     ldrbt   r3, [r0], #1
+10:    ldrbt   rb, [r0], #0
+#else
+rb     .req    r0
+9:     ldrb    r3, [r0]
+10:    ldrb    rb, [r0, #1]
+#endif
+       orr     r3, rb, r3, lsl #8
+       mov     r0, #0
+       ret     lr
+ENDPROC(__get_user_64t_2)
+
+ENTRY(__get_user_64t_4)
+       check_uaccess r0, 4, r1, r2, __get_user_bad8
+11: TUSER(ldr) r3, [r0]
+       mov     r0, #0
+       ret     lr
+ENDPROC(__get_user_64t_4)
 #endif
 
 __get_user_bad8:
@@ -111,5 +141,9 @@ ENDPROC(__get_user_bad8)
        .long   6b, __get_user_bad8
 #ifdef __ARMEB__
        .long   7b, __get_user_bad
+       .long   8b, __get_user_bad8
+       .long   9b, __get_user_bad8
+       .long   10b, __get_user_bad8
+       .long   11b, __get_user_bad8
 #endif
 .popsection
index 1a24e92..b64e67c 100644 (file)
@@ -146,7 +146,6 @@ ENDPROC(cpu_v7_set_pte_ext)
        mov     \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT)        @ upper bits
        mov     \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT             @ lower bits
        addls   \ttbr1, \ttbr1, #TTBR1_OFFSET
-       adcls   \tmp, \tmp, #0
        mcrr    p15, 1, \ttbr1, \tmp, c2                        @ load TTBR1
        mov     \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT)        @ upper bits
        mov     \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT             @ lower bits
index 1296952..1f85bfe 100644 (file)
@@ -1 +1 @@
-obj-y          := enlighten.o hypercall.o grant-table.o p2m.o mm.o
+obj-y          := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o
index 98544c5..0e15f01 100644 (file)
@@ -260,6 +260,12 @@ static int __init xen_guest_init(void)
        xen_domain_type = XEN_HVM_DOMAIN;
 
        xen_setup_features();
+
+       if (!xen_feature(XENFEAT_grant_map_identity)) {
+               pr_warn("Please upgrade your Xen.\n"
+                               "If your platform has any non-coherent DMA devices, they won't work properly.\n");
+       }
+
        if (xen_feature(XENFEAT_dom0))
                xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
        else
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
new file mode 100644 (file)
index 0000000..3b99860
--- /dev/null
@@ -0,0 +1,202 @@
+#include <linux/cpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+#include <xen/features.h>
+
+static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
+static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
+
+static int alloc_xen_mm32_scratch_page(int cpu)
+{
+       struct page *page;
+       unsigned long virt;
+       pmd_t *pmdp;
+       pte_t *ptep;
+
+       if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
+               return 0;
+
+       page = alloc_page(GFP_KERNEL);
+       if (page == NULL) {
+               pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
+               return -ENOMEM;
+       }
+
+       virt = (unsigned long)__va(page_to_phys(page));
+       pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+       ptep = pte_offset_kernel(pmdp, virt);
+
+       per_cpu(xen_mm32_scratch_virt, cpu) = virt;
+       per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
+
+       return 0;
+}
+
+static int xen_mm32_cpu_notify(struct notifier_block *self,
+                                   unsigned long action, void *hcpu)
+{
+       int cpu = (long)hcpu;
+       switch (action) {
+       case CPU_UP_PREPARE:
+               if (alloc_xen_mm32_scratch_page(cpu))
+                       return NOTIFY_BAD;
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block xen_mm32_cpu_notifier = {
+       .notifier_call  = xen_mm32_cpu_notify,
+};
+
+static void* xen_mm32_remap_page(dma_addr_t handle)
+{
+       unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
+       pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
+
+       *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
+       local_flush_tlb_kernel_page(virt);
+
+       return (void*)virt;
+}
+
+static void xen_mm32_unmap(void *vaddr)
+{
+       put_cpu_var(xen_mm32_scratch_virt);
+}
+
+
+/* functions called by SWIOTLB */
+
+static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
+       size_t size, enum dma_data_direction dir,
+       void (*op)(const void *, size_t, int))
+{
+       unsigned long pfn;
+       size_t left = size;
+
+       pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
+       offset %= PAGE_SIZE;
+
+       do {
+               size_t len = left;
+               void *vaddr;
+       
+               if (!pfn_valid(pfn))
+               {
+                       /* Cannot map the page, we don't know its physical address.
+                        * Return and hope for the best */
+                       if (!xen_feature(XENFEAT_grant_map_identity))
+                               return;
+                       vaddr = xen_mm32_remap_page(handle) + offset;
+                       op(vaddr, len, dir);
+                       xen_mm32_unmap(vaddr - offset);
+               } else {
+                       struct page *page = pfn_to_page(pfn);
+
+                       if (PageHighMem(page)) {
+                               if (len + offset > PAGE_SIZE)
+                                       len = PAGE_SIZE - offset;
+
+                               if (cache_is_vipt_nonaliasing()) {
+                                       vaddr = kmap_atomic(page);
+                                       op(vaddr + offset, len, dir);
+                                       kunmap_atomic(vaddr);
+                               } else {
+                                       vaddr = kmap_high_get(page);
+                                       if (vaddr) {
+                                               op(vaddr + offset, len, dir);
+                                               kunmap_high(page);
+                                       }
+                               }
+                       } else {
+                               vaddr = page_address(page) + offset;
+                               op(vaddr, len, dir);
+                       }
+               }
+
+               offset = 0;
+               pfn++;
+               left -= len;
+       } while (left);
+}
+
+static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir)
+{
+       /* Cannot use __dma_page_dev_to_cpu because we don't have a
+        * struct page for handle */
+
+       if (dir != DMA_TO_DEVICE)
+               outer_inv_range(handle, handle + size);
+
+       dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
+}
+
+static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir)
+{
+
+       dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
+
+       if (dir == DMA_FROM_DEVICE) {
+               outer_inv_range(handle, handle + size);
+       } else {
+               outer_clean_range(handle, handle + size);
+       }
+}
+
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+
+{
+       if (!__generic_dma_ops(hwdev)->unmap_page)
+               return;
+       if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+               return;
+
+       __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
+               return;
+       __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__generic_dma_ops(hwdev)->sync_single_for_device)
+               return;
+       __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
+}
+
+int __init xen_mm32_init(void)
+{
+       int cpu;
+
+       if (!xen_initial_domain())
+               return 0;
+
+       register_cpu_notifier(&xen_mm32_cpu_notifier);
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               if (alloc_xen_mm32_scratch_page(cpu)) {
+                       put_online_cpus();
+                       unregister_cpu_notifier(&xen_mm32_cpu_notifier);
+                       return -ENOMEM;
+               }
+       }
+       put_online_cpus();
+
+       return 0;
+}
+arch_initcall(xen_mm32_init);
index 97baf44..0548577 100644 (file)
@@ -21,14 +21,12 @@ struct xen_p2m_entry {
        unsigned long pfn;
        unsigned long mfn;
        unsigned long nr_pages;
-       struct rb_node rbnode_mach;
        struct rb_node rbnode_phys;
 };
 
 static rwlock_t p2m_lock;
 struct rb_root phys_to_mach = RB_ROOT;
 EXPORT_SYMBOL_GPL(phys_to_mach);
-static struct rb_root mach_to_phys = RB_ROOT;
 
 static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
 {
@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
                parent = *link;
                entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
 
-               if (new->mfn == entry->mfn)
-                       goto err_out;
                if (new->pfn == entry->pfn)
                        goto err_out;
 
@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn)
 }
 EXPORT_SYMBOL_GPL(__pfn_to_mfn);
 
-static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
-{
-       struct rb_node **link = &mach_to_phys.rb_node;
-       struct rb_node *parent = NULL;
-       struct xen_p2m_entry *entry;
-       int rc = 0;
-
-       while (*link) {
-               parent = *link;
-               entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
-
-               if (new->mfn == entry->mfn)
-                       goto err_out;
-               if (new->pfn == entry->pfn)
-                       goto err_out;
-
-               if (new->mfn < entry->mfn)
-                       link = &(*link)->rb_left;
-               else
-                       link = &(*link)->rb_right;
-       }
-       rb_link_node(&new->rbnode_mach, parent, link);
-       rb_insert_color(&new->rbnode_mach, &mach_to_phys);
-       goto out;
-
-err_out:
-       rc = -EINVAL;
-       pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
-                       __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
-out:
-       return rc;
-}
-
-unsigned long __mfn_to_pfn(unsigned long mfn)
-{
-       struct rb_node *n = mach_to_phys.rb_node;
-       struct xen_p2m_entry *entry;
-       unsigned long irqflags;
-
-       read_lock_irqsave(&p2m_lock, irqflags);
-       while (n) {
-               entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
-               if (entry->mfn <= mfn &&
-                               entry->mfn + entry->nr_pages > mfn) {
-                       read_unlock_irqrestore(&p2m_lock, irqflags);
-                       return entry->pfn + (mfn - entry->mfn);
-               }
-               if (mfn < entry->mfn)
-                       n = n->rb_left;
-               else
-                       n = n->rb_right;
-       }
-       read_unlock_irqrestore(&p2m_lock, irqflags);
-
-       return INVALID_P2M_ENTRY;
-}
-EXPORT_SYMBOL_GPL(__mfn_to_pfn);
-
 int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
                            struct gnttab_map_grant_ref *kmap_ops,
                            struct page **pages, unsigned int count)
@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
                        p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
                        if (p2m_entry->pfn <= pfn &&
                                        p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
-                               rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
                                rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
                                write_unlock_irqrestore(&p2m_lock, irqflags);
                                kfree(p2m_entry);
@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
        p2m_entry->mfn = mfn;
 
        write_lock_irqsave(&p2m_lock, irqflags);
-       if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
-               (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
+       if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) {
                write_unlock_irqrestore(&p2m_lock, irqflags);
                return false;
        }
index 0f08dfd..dfa6e3e 100644 (file)
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
        if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
                return false;
 
-       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
+       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+               affinity = cpu_online_mask;
                ret = true;
+       }
 
-       /*
-        * when using forced irq_set_affinity we must ensure that the cpu
-        * being offlined is not present in the affinity mask, it may be
-        * selected as the target CPU otherwise
-        */
-       affinity = cpu_online_mask;
        c = irq_data_get_irq_chip(d);
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-       else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+       else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
                cpumask_copy(d->affinity, affinity);
 
        return ret;
index 1309d64..29d4869 100644 (file)
@@ -230,9 +230,27 @@ void exit_thread(void)
 {
 }
 
+static void tls_thread_flush(void)
+{
+       asm ("msr tpidr_el0, xzr");
+
+       if (is_compat_task()) {
+               current->thread.tp_value = 0;
+
+               /*
+                * We need to ensure ordering between the shadow state and the
+                * hardware state, so that we don't corrupt the hardware state
+                * with a stale shadow state during context switch.
+                */
+               barrier();
+               asm ("msr tpidrro_el0, xzr");
+       }
+}
+
 void flush_thread(void)
 {
        fpsimd_flush_thread();
+       tls_thread_flush();
        flush_ptrace_hw_breakpoint(current);
 }
 
index de2b022..dc47e53 100644 (file)
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
 
        case __ARM_NR_compat_set_tls:
                current->thread.tp_value = regs->regs[0];
+
+               /*
+                * Protect against register corruption from context switch.
+                * See comment in tls_thread_flush.
+                */
+               barrier();
                asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
                return 0;
 
index 5472c24..a83061f 100644 (file)
@@ -149,8 +149,7 @@ void __init arm64_memblock_init(void)
                memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
 #endif
 
-       if (!efi_enabled(EFI_MEMMAP))
-               early_init_fdt_scan_reserved_mem();
+       early_init_fdt_scan_reserved_mem();
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA))
index 4c4ac16..b6bda18 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -6,6 +5,8 @@ CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_IA64_DIG=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
@@ -51,9 +52,6 @@ CONFIG_DM_MIRROR=m
 CONFIG_DM_ZERO=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_NET_PCI=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
@@ -85,7 +83,6 @@ CONFIG_EXT3_FS=y
 CONFIG_XFS_FS=y
 CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -95,17 +92,13 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
 CONFIG_CIFS=m
 CONFIG_CIFS_STATS=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=m
index e8ed3ae..81f686d 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
@@ -6,13 +5,13 @@ CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=20
 CONFIG_CGROUPS=y
 CONFIG_CPUSETS=y
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_PAGE_SIZE_64KB=y
 CONFIG_IA64_CYCLONE=y
@@ -29,14 +28,13 @@ CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_DOCK=y
 CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
 CONFIG_SYN_COOKIES=y
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -82,16 +80,13 @@ CONFIG_FUSION_FC=m
 CONFIG_FUSION_SAS=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
+CONFIG_NETCONSOLE=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=m
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=m
 CONFIG_E1000=y
 CONFIG_IGB=y
-CONFIG_TIGON3=y
-CONFIG_NETCONSOLE=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_GAMEPORT=m
 CONFIG_SERIAL_NONSTANDARD=y
@@ -151,6 +146,7 @@ CONFIG_USB_STORAGE=m
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_MTHCA=m
 CONFIG_INFINIBAND_IPOIB=m
+CONFIG_INTEL_IOMMU=y
 CONFIG_MSPEC=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
@@ -164,7 +160,6 @@ CONFIG_REISERFS_FS_XATTR=y
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=y
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -175,16 +170,10 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
 CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
 CONFIG_NLS_CODEPAGE_775=m
@@ -225,11 +214,7 @@ CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_MD5=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRC_T10DIF=y
-CONFIG_INTEL_IOMMU=y
index d663efd..5b4fcdd 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
@@ -9,6 +8,8 @@ CONFIG_KALLSYMS_ALL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_CYCLONE=y
 CONFIG_SMP=y
@@ -24,14 +25,12 @@ CONFIG_BINFMT_MISC=m
 CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=m
 CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
 CONFIG_SYN_COOKIES=y
 # CONFIG_IPV6 is not set
 CONFIG_BLK_DEV_LOOP=m
@@ -71,15 +70,12 @@ CONFIG_FUSION_SPI=y
 CONFIG_FUSION_FC=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
+CONFIG_NETCONSOLE=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=m
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=m
 CONFIG_E1000=y
-CONFIG_TIGON3=y
-CONFIG_NETCONSOLE=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_GAMEPORT=m
 CONFIG_SERIAL_NONSTANDARD=y
@@ -146,7 +142,6 @@ CONFIG_REISERFS_FS_XATTR=y
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=y
-CONFIG_AUTOFS_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -157,16 +152,10 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
 CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
 CONFIG_NLS_CODEPAGE_775=m
index b4548a3..f0f69fd 100644 (file)
@@ -1,13 +1,12 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_IA64_HP_SIM=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_PAGE_SIZE_64KB=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SPI_ATTRS=y
@@ -49,8 +47,6 @@ CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
+CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_INFO=y
index c8a3f40..192ed15 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
@@ -11,6 +10,8 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_IA64_DIG=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_PAGE_SIZE_64KB=y
@@ -29,14 +30,12 @@ CONFIG_BINFMT_MISC=m
 CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=m
 CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
 CONFIG_SYN_COOKIES=y
 # CONFIG_IPV6 is not set
 CONFIG_BLK_DEV_LOOP=m
@@ -53,6 +52,7 @@ CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=m
 CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_QLOGIC_1280=y
 CONFIG_MD=y
@@ -72,15 +72,12 @@ CONFIG_FUSION_FC=y
 CONFIG_FUSION_CTL=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
+CONFIG_NETCONSOLE=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=m
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=m
 CONFIG_E1000=y
-CONFIG_TIGON3=y
-CONFIG_NETCONSOLE=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_GAMEPORT=m
 CONFIG_SERIAL_NONSTANDARD=y
@@ -118,7 +115,6 @@ CONFIG_REISERFS_FS_XATTR=y
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=y
-CONFIG_AUTOFS_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -129,16 +125,10 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
 CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
 CONFIG_NLS_CODEPAGE_775=m
@@ -180,6 +170,5 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_IA64_GRANULE_16MB=y
-CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_MD5=y
index 54bc72e..b504c8e 100644 (file)
@@ -1,9 +1,9 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_IA64_HP_ZX1=y
 CONFIG_MCKINLEY=y
 CONFIG_SMP=y
@@ -18,6 +18,7 @@ CONFIG_EFI_VARS=y
 CONFIG_BINFMT_MISC=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
@@ -37,9 +38,9 @@ CONFIG_CHR_DEV_OSST=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_QLOGIC_1280=y
 CONFIG_FUSION=y
@@ -48,18 +49,15 @@ CONFIG_FUSION_FC=y
 CONFIG_FUSION_CTL=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
-CONFIG_NET_ETHERNET=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=y
 CONFIG_TULIP_MWI=y
 CONFIG_TULIP_MMIO=y
 CONFIG_TULIP_NAPI=y
 CONFIG_TULIP_NAPI_HW_MITIGATION=y
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=y
 CONFIG_E1000=y
-CONFIG_TIGON3=y
 CONFIG_INPUT_JOYDEV=y
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
@@ -100,7 +98,6 @@ CONFIG_USB_STORAGE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
-CONFIG_AUTOFS_FS=y
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_UDF_FS=y
@@ -110,12 +107,9 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=y
 CONFIG_NLS_CODEPAGE_775=y
index 6a65bb7..18026b2 100644 (file)
 #define __NR_sched_getattr             1337
 #define __NR_renameat2                 1338
 #define __NR_getrandom                 1339
-#define __NR_memfd_create              1339
+#define __NR_memfd_create              1340
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index ec73b2c..fc505d5 100644 (file)
@@ -38,27 +38,6 @@ static void pci_fixup_video(struct pci_dev *pdev)
                return;
        /* Maybe, this machine supports legacy memory map. */
 
-       if (!vga_default_device()) {
-               resource_size_t start, end;
-               int i;
-
-               /* Does firmware framebuffer belong to us? */
-               for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
-                       if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
-                               continue;
-
-                       start = pci_resource_start(pdev, i);
-                       end  = pci_resource_end(pdev, i);
-
-                       if (!start || !end)
-                               continue;
-
-                       if (screen_info.lfb_base >= start &&
-                           (screen_info.lfb_base + screen_info.lfb_size) < end)
-                               vga_set_default_device(pdev);
-               }
-       }
-
        /* Is VGA routed to us? */
        bus = pdev->bus;
        while (bus) {
@@ -83,8 +62,7 @@ static void pci_fixup_video(struct pci_dev *pdev)
                pci_read_config_word(pdev, PCI_COMMAND, &config);
                if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
                        pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
-                       dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n");
-                       vga_set_default_device(pdev);
+                       dev_printk(KERN_DEBUG, &pdev->dev, "Video device with shadowed ROM\n");
                }
        }
 }
index 40e1c1d..6feded3 100644 (file)
@@ -127,7 +127,7 @@ config SECCOMP
 
 endmenu
 
-menu "Advanced setup"
+menu "Kernel features"
 
 config ADVANCED_OPTIONS
        bool "Prompt for advanced kernel configuration options"
@@ -248,10 +248,10 @@ config MICROBLAZE_64K_PAGES
 
 endchoice
 
-endmenu
-
 source "mm/Kconfig"
 
+endmenu
+
 menu "Executable file formats"
 
 source "fs/Kconfig.binfmt"
index b4a4cb1..596e485 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <asm/percpu.h>
 #include <asm/ptrace.h>
+#include <linux/linkage.h>
 
 /*
  * These are per-cpu variables required in entry.S, among other
index 0aa0057..59a89a6 100644 (file)
@@ -98,13 +98,13 @@ static inline int access_ok(int type, const void __user *addr,
 
        if ((get_fs().seg < ((unsigned long)addr)) ||
                        (get_fs().seg < ((unsigned long)addr + size - 1))) {
-               pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+               pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
                        type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
                        (u32)get_fs().seg);
                return 0;
        }
 ok:
-       pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+       pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
                        type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
                        (u32)get_fs().seg);
        return 1;
index fd56a8f..ea4b233 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         381
+#define __NR_syscalls         387
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index 900c7e5..574c430 100644 (file)
@@ -546,6 +546,7 @@ config SGI_IP28
        # select SYS_HAS_EARLY_PRINTK
        select SYS_SUPPORTS_64BIT_KERNEL
        select SYS_SUPPORTS_BIG_ENDIAN
+       select MIPS_L1_CACHE_SHIFT_7
       help
         This is the SGI Indigo2 with R10000 processor.  To compile a Linux
         kernel that runs on these, say Y here.
@@ -2029,7 +2030,9 @@ config MIPS_CMP
        bool "MIPS CMP framework support (DEPRECATED)"
        depends on SYS_SUPPORTS_MIPS_CMP
        select MIPS_GIC_IPI
+       select SMP
        select SYNC_R4K
+       select SYS_SUPPORTS_SMP
        select WEAK_ORDERING
        default n
        help
index 9336509..bbac51e 100644 (file)
@@ -113,7 +113,16 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
-cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,-msmartmips)
+# For smartmips configurations, there are hundreds of warnings due to ISA overrides
+# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
+# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
+# similar directives in the kernel will spam the build logs with the following warnings:
+# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
+# or
+# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
+# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
+# been fixed properly.
+cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,-msmartmips) -Wa,--no-warn
 cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips)
 
 cflags-$(CONFIG_SB1XXX_CORELIS)        += $(call cc-option,-mno-sched-prolog) \
index 37eb2d1..b94bf44 100644 (file)
@@ -434,7 +434,7 @@ static void bcm63xx_init_irq(void)
                irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
                irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
                irq_stat_addr[1] = 0;
-               irq_stat_addr[1] = 0;
+               irq_mask_addr[1] = 0;
                irq_bits = 32;
                ext_irq_count = 4;
                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
@@ -443,7 +443,7 @@ static void bcm63xx_init_irq(void)
                irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
                irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
                irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
-               irq_stat_addr[1] += PERF_IRQMASK_6328_REG(1);
+               irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
                irq_bits = 64;
                ext_irq_count = 4;
                is_ext_irq_cascaded = 1;
index b49c7ad..31903cf 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <linux/string.h>
 
 #include <asm/addrspace.h>
 
index d035298..51f80bd 100644 (file)
@@ -16,8 +16,8 @@
 extern void octeon_cop2_save(struct octeon_cop2_state *);
 extern void octeon_cop2_restore(struct octeon_cop2_state *);
 
-#define cop2_save(r)           octeon_cop2_save(r)
-#define cop2_restore(r)                octeon_cop2_restore(r)
+#define cop2_save(r)           octeon_cop2_save(&(r)->thread.cp2)
+#define cop2_restore(r)                octeon_cop2_restore(&(r)->thread.cp2)
 
 #define cop2_present           1
 #define cop2_lazy_restore      1
@@ -26,26 +26,26 @@ extern void octeon_cop2_restore(struct octeon_cop2_state *);
 
 extern void nlm_cop2_save(struct nlm_cop2_state *);
 extern void nlm_cop2_restore(struct nlm_cop2_state *);
-#define cop2_save(r)           nlm_cop2_save(r)
-#define cop2_restore(r)                nlm_cop2_restore(r)
+
+#define cop2_save(r)           nlm_cop2_save(&(r)->thread.cp2)
+#define cop2_restore(r)                nlm_cop2_restore(&(r)->thread.cp2)
 
 #define cop2_present           1
 #define cop2_lazy_restore      0
 
 #elif defined(CONFIG_CPU_LOONGSON3)
 
-#define cop2_save(r)
-#define cop2_restore(r)
-
 #define cop2_present           1
 #define cop2_lazy_restore      1
+#define cop2_save(r)           do { (r); } while (0)
+#define cop2_restore(r)                do { (r); } while (0)
 
 #else
 
 #define cop2_present           0
 #define cop2_lazy_restore      0
-#define cop2_save(r)
-#define cop2_restore(r)
+#define cop2_save(r)           do { (r); } while (0)
+#define cop2_restore(r)                do { (r); } while (0)
 #endif
 
 enum cu2_ops {
index 5d6a764..c4a9127 100644 (file)
 #ifndef _ASM_MACH_IP28_SPACES_H
 #define _ASM_MACH_IP28_SPACES_H
 
-#define CAC_BASE       _AC(0xa800000000000000, UL)
-
-#define HIGHMEM_START  (~0UL)
-
 #define PHYS_OFFSET    _AC(0x20000000, UL)
 
-#define UNCAC_BASE     _AC(0xc0000000, UL)     /* 0xa0000000 + PHYS_OFFSET */
-#define IO_BASE                UNCAC_BASE
-
 #include <asm/mach-generic/spaces.h>
 
 #endif /* _ASM_MACH_IP28_SPACES_H */
index 5699ec3..3be8180 100644 (file)
@@ -37,7 +37,7 @@
 
 /*
  * This is used for calculating the real page sizes
- * for FTLB or VTLB + FTLB confugrations.
+ * for FTLB or VTLB + FTLB configurations.
  */
 static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
 {
@@ -223,7 +223,8 @@ static inline int pfn_valid(unsigned long pfn)
 
 #endif
 
-#define virt_to_page(kaddr)    pfn_to_page(PFN_DOWN(virt_to_phys(kaddr)))
+#define virt_to_page(kaddr)    pfn_to_page(PFN_DOWN(virt_to_phys((void *)     \
+                                                                 (kaddr))))
 
 extern int __virt_addr_valid(const volatile void *kaddr);
 #define virt_addr_valid(kaddr)                                         \
index 1e0f20a..eacf865 100644 (file)
@@ -37,11 +37,6 @@ extern int __cpu_logical_map[NR_CPUS];
 
 #define NO_PROC_ID     (-1)
 
-#define topology_physical_package_id(cpu)      (cpu_data[cpu].package)
-#define topology_core_id(cpu)                  (cpu_data[cpu].core)
-#define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
-#define topology_thread_cpumask(cpu)           (&cpu_sibling_map[cpu])
-
 #define SMP_RESCHEDULE_YOURSELF 0x1    /* XXX braindead */
 #define SMP_CALL_FUNCTION      0x2
 /* Octeon - Tell another core to flush its icache */
index 495c104..b928b6f 100644 (file)
@@ -92,7 +92,7 @@ do {                                                                  \
                        KSTK_STATUS(prev) &= ~ST0_CU2;                  \
                __c0_stat = read_c0_status();                           \
                write_c0_status(__c0_stat | ST0_CU2);                   \
-               cop2_save(&prev->thread.cp2);                           \
+               cop2_save(prev);                                        \
                write_c0_status(__c0_stat & ~ST0_CU2);                  \
        }                                                               \
        __clear_software_ll_bit();                                      \
@@ -111,7 +111,7 @@ do {                                                                        \
                        (KSTK_STATUS(current) & ST0_CU2)) {             \
                __c0_stat = read_c0_status();                           \
                write_c0_status(__c0_stat | ST0_CU2);                   \
-               cop2_restore(&current->thread.cp2);                     \
+               cop2_restore(current);                                  \
                write_c0_status(__c0_stat & ~ST0_CU2);                  \
        }                                                               \
        if (cpu_has_dsp)                                                \
index 20ea485..3e307ec 100644 (file)
@@ -9,5 +9,13 @@
 #define __ASM_TOPOLOGY_H
 
 #include <topology.h>
+#include <linux/smp.h>
+
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu)      (cpu_data[cpu].package)
+#define topology_core_id(cpu)                  (cpu_data[cpu].core)
+#define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
+#define topology_thread_cpumask(cpu)           (&cpu_sibling_map[cpu])
+#endif
 
 #endif /* __ASM_TOPOLOGY_H */
index 9bc13ea..fdb4923 100644 (file)
 #define __NR_sched_getattr             (__NR_Linux + 350)
 #define __NR_renameat2                 (__NR_Linux + 351)
 #define __NR_seccomp                   (__NR_Linux + 352)
+#define __NR_getrandom                 (__NR_Linux + 353)
+#define __NR_memfd_create              (__NR_Linux + 354)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            352
+#define __NR_Linux_syscalls            354
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                352
+#define __NR_O32_Linux_syscalls                354
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_sched_getattr             (__NR_Linux + 310)
 #define __NR_renameat2                 (__NR_Linux + 311)
 #define __NR_seccomp                   (__NR_Linux + 312)
+#define __NR_getrandom                 (__NR_Linux + 313)
+#define __NR_memfd_create              (__NR_Linux + 314)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            312
+#define __NR_Linux_syscalls            314
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         312
+#define __NR_64_Linux_syscalls         314
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_sched_getattr             (__NR_Linux + 314)
 #define __NR_renameat2                 (__NR_Linux + 315)
 #define __NR_seccomp                   (__NR_Linux + 316)
+#define __NR_getrandom                 (__NR_Linux + 317)
+#define __NR_memfd_create              (__NR_Linux + 318)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            316
+#define __NR_Linux_syscalls            318
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                316
+#define __NR_N32_Linux_syscalls                318
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 992e184..50980bf 100644 (file)
@@ -71,8 +71,12 @@ machine_kexec(struct kimage *image)
        kexec_start_address =
                (unsigned long) phys_to_virt(image->start);
 
-       kexec_indirection_page =
-               (unsigned long) phys_to_virt(image->head & PAGE_MASK);
+       if (image->type == KEXEC_TYPE_DEFAULT) {
+               kexec_indirection_page =
+                       (unsigned long) phys_to_virt(image->head & PAGE_MASK);
+       } else {
+               kexec_indirection_page = (unsigned long)&image->head;
+       }
 
        memcpy((void*)reboot_code_buffer, relocate_new_kernel,
               relocate_new_kernel_size);
index f93b4cb..744cd10 100644 (file)
@@ -577,3 +577,5 @@ EXPORT(sys_call_table)
        PTR     sys_sched_getattr               /* 4350 */
        PTR     sys_renameat2
        PTR     sys_seccomp
+       PTR     sys_getrandom
+       PTR     sys_memfd_create
index 03ebd99..002b1bc 100644 (file)
@@ -432,4 +432,6 @@ EXPORT(sys_call_table)
        PTR     sys_sched_getattr               /* 5310 */
        PTR     sys_renameat2
        PTR     sys_seccomp
+       PTR     sys_getrandom
+       PTR     sys_memfd_create
        .size   sys_call_table,.-sys_call_table
index ebc9228..ca6cbbe 100644 (file)
@@ -425,4 +425,6 @@ EXPORT(sysn32_call_table)
        PTR     sys_sched_getattr
        PTR     sys_renameat2                   /* 6315 */
        PTR     sys_seccomp
+       PTR     sys_getrandom
+       PTR     sys_memfd_create
        .size   sysn32_call_table,.-sysn32_call_table
index 25bb840..9e10d11 100644 (file)
@@ -562,4 +562,6 @@ EXPORT(sys32_call_table)
        PTR     sys_sched_getattr               /* 4350 */
        PTR     sys_renameat2
        PTR     sys_seccomp
+       PTR     sys_getrandom
+       PTR     sys_memfd_create
        .size   sys32_call_table,.-sys32_call_table
index 571aab0..f42e35e 100644 (file)
@@ -53,6 +53,7 @@
  */
 unsigned long empty_zero_page, zero_page_mask;
 EXPORT_SYMBOL_GPL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
 
 /*
  * Not static inline because used by IP27 special magic initialization code
index 05a5661..9f7ecbd 100644 (file)
@@ -793,6 +793,7 @@ static int build_body(struct jit_ctx *ctx)
        const struct sock_filter *inst;
        unsigned int i, off, load_order, condt;
        u32 k, b_off __maybe_unused;
+       int tmp;
 
        for (i = 0; i < prog->len; i++) {
                u16 code;
@@ -1332,9 +1333,9 @@ jmp_cmp:
                case BPF_ANC | SKF_AD_PKTTYPE:
                        ctx->flags |= SEEN_SKB;
 
-                       off = pkt_type_offset();
+                       tmp = off = pkt_type_offset();
 
-                       if (off < 0)
+                       if (tmp < 0)
                                return -1;
                        emit_load_byte(r_tmp, r_skb, off, ctx);
                        /* Keep only the last 3 bits */
index 6e75e20..1554a6f 100644 (file)
@@ -321,6 +321,22 @@ source "fs/Kconfig"
 
 source "arch/parisc/Kconfig.debug"
 
+config SECCOMP
+       def_bool y
+       prompt "Enable seccomp to safely compute untrusted bytecode"
+       ---help---
+         This kernel feature is useful for number crunching applications
+         that may need to compute untrusted bytecode during their
+         execution. By using pipes or other transports made available to
+         the process as file descriptors supporting the read/write
+         syscalls, it's possible to isolate those applications in
+         their own address space using seccomp. Once seccomp is
+         enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
+         and the task is only allowed to execute a few safe syscalls
+         defined by each seccomp mode.
+
+         If unsure, say Y. Only embedded should say N here.
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
index 7187664..5db8882 100644 (file)
@@ -48,7 +48,12 @@ cflags-y     := -pipe
 
 # These flags should be implied by an hppa-linux configuration, but they
 # are not in gcc 3.2.
-cflags-y       += -mno-space-regs -mfast-indirect-calls
+cflags-y       += -mno-space-regs
+
+# -mfast-indirect-calls is only relevant for 32-bit kernels.
+ifndef CONFIG_64BIT
+cflags-y       += -mfast-indirect-calls
+endif
 
 # Currently we save and restore fpregs on all kernel entry/interruption paths.
 # If that gets optimized, we might need to disable the use of fpregs in the
index d9dc6cd..e5c4da0 100644 (file)
@@ -456,7 +456,7 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2)
                }
 
                /* String could be altered by userspace after strlen_user() */
-               fsname[len] = '\0';
+               fsname[len - 1] = '\0';
 
                printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname);
                if ( !strcmp(fsname, "hfs") ) {
diff --git a/arch/parisc/include/asm/seccomp.h b/arch/parisc/include/asm/seccomp.h
new file mode 100644 (file)
index 0000000..015f788
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _ASM_PARISC_SECCOMP_H
+#define _ASM_PARISC_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
+
+#endif /* _ASM_PARISC_SECCOMP_H */
index 4b9b10c..a846118 100644 (file)
@@ -60,6 +60,7 @@ struct thread_info {
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 #define TIF_SINGLESTEP         9       /* single stepping? */
 #define TIF_BLOCKSTEP          10      /* branch stepping? */
+#define TIF_SECCOMP            11      /* secure computing */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
@@ -70,11 +71,13 @@ struct thread_info {
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
 #define _TIF_BLOCKSTEP         (1 << TIF_BLOCKSTEP)
+#define _TIF_SECCOMP           (1 << TIF_SECCOMP)
 
 #define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
                                  _TIF_NEED_RESCHED)
 #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP |        \
-                                _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT)
+                                _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \
+                                _TIF_SECCOMP)
 
 #ifdef CONFIG_64BIT
 # ifdef CONFIG_COMPAT
index 47e0e21..8667f18 100644 (file)
 #define __NR_sched_getattr     (__NR_Linux + 335)
 #define __NR_utimes            (__NR_Linux + 336)
 #define __NR_renameat2         (__NR_Linux + 337)
+#define __NR_seccomp           (__NR_Linux + 338)
+#define __NR_getrandom         (__NR_Linux + 339)
+#define __NR_memfd_create      (__NR_Linux + 340)
 
-#define __NR_Linux_syscalls    (__NR_renameat2 + 1)
+#define __NR_Linux_syscalls    (__NR_memfd_create + 1)
 
 
 #define __IGNORE_select                /* newselect */
index e842ee2..92438c2 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/user.h>
 #include <linux/personality.h>
 #include <linux/security.h>
+#include <linux/seccomp.h>
 #include <linux/compat.h>
 #include <linux/signal.h>
 #include <linux/audit.h>
@@ -270,6 +271,9 @@ long do_syscall_trace_enter(struct pt_regs *regs)
 {
        long ret = 0;
 
+       /* Do the secure computing check first. */
+       secure_computing_strict(regs->gr[20]);
+
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
            tracehook_report_syscall_entry(regs))
                ret = -1L;
index 8387860..7ef22e3 100644 (file)
@@ -74,7 +74,7 @@ ENTRY(linux_gateway_page)
        /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
        /* Light-weight-syscall entry must always be located at 0xb0 */
        /* WARNING: Keep this number updated with table size changes */
-#define __NR_lws_entries (2)
+#define __NR_lws_entries (3)
 
 lws_entry:
        gate    lws_start, %r0          /* increase privilege */
@@ -502,7 +502,7 @@ lws_exit:
 
        
        /***************************************************
-               Implementing CAS as an atomic operation:
+               Implementing 32bit CAS as an atomic operation:
 
                %r26 - Address to examine
                %r25 - Old value to check (old)
@@ -659,6 +659,230 @@ cas_action:
        ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
 
 
+       /***************************************************
+               New CAS implementation which uses pointers and variable size
+               information. The value pointed by old and new MUST NOT change
+               while performing CAS. The lock only protect the value at %r26.
+
+               %r26 - Address to examine
+               %r25 - Pointer to the value to check (old)
+               %r24 - Pointer to the value to set (new)
+               %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
+               %r28 - Return non-zero on failure
+               %r21 - Kernel error code
+
+               %r21 has the following meanings:
+
+               EAGAIN - CAS is busy, ldcw failed, try again.
+               EFAULT - Read or write failed.
+
+               Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
+
+       ****************************************************/
+
+       /* ELF32 Process entry path */
+lws_compare_and_swap_2:
+#ifdef CONFIG_64BIT
+       /* Clip the input registers */
+       depdi   0, 31, 32, %r26
+       depdi   0, 31, 32, %r25
+       depdi   0, 31, 32, %r24
+       depdi   0, 31, 32, %r23
+#endif
+
+       /* Check the validity of the size pointer */
+       subi,>>= 4, %r23, %r0
+       b,n     lws_exit_nosys
+
+       /* Jump to the functions which will load the old and new values into
+          registers depending on the their size */
+       shlw    %r23, 2, %r29
+       blr     %r29, %r0
+       nop
+
+       /* 8bit load */
+4:     ldb     0(%sr3,%r25), %r25
+       b       cas2_lock_start
+5:     ldb     0(%sr3,%r24), %r24
+       nop
+       nop
+       nop
+       nop
+       nop
+
+       /* 16bit load */
+6:     ldh     0(%sr3,%r25), %r25
+       b       cas2_lock_start
+7:     ldh     0(%sr3,%r24), %r24
+       nop
+       nop
+       nop
+       nop
+       nop
+
+       /* 32bit load */
+8:     ldw     0(%sr3,%r25), %r25
+       b       cas2_lock_start
+9:     ldw     0(%sr3,%r24), %r24
+       nop
+       nop
+       nop
+       nop
+       nop
+
+       /* 64bit load */
+#ifdef CONFIG_64BIT
+10:    ldd     0(%sr3,%r25), %r25
+11:    ldd     0(%sr3,%r24), %r24
+#else
+       /* Load new value into r22/r23 - high/low */
+10:    ldw     0(%sr3,%r25), %r22
+11:    ldw     4(%sr3,%r25), %r23
+       /* Load new value into fr4 for atomic store later */
+12:    flddx   0(%sr3,%r24), %fr4
+#endif
+
+cas2_lock_start:
+       /* Load start of lock table */
+       ldil    L%lws_lock_start, %r20
+       ldo     R%lws_lock_start(%r20), %r28
+
+       /* Extract four bits from r26 and hash lock (Bits 4-7) */
+       extru  %r26, 27, 4, %r20
+
+       /* Find lock to use, the hash is either one of 0 to
+          15, multiplied by 16 (keep it 16-byte aligned)
+          and add to the lock table offset. */
+       shlw    %r20, 4, %r20
+       add     %r20, %r28, %r20
+
+       rsm     PSW_SM_I, %r0                   /* Disable interrupts */
+       /* COW breaks can cause contention on UP systems */
+       LDCW    0(%sr2,%r20), %r28              /* Try to acquire the lock */
+       cmpb,<>,n       %r0, %r28, cas2_action  /* Did we get it? */
+cas2_wouldblock:
+       ldo     2(%r0), %r28                    /* 2nd case */
+       ssm     PSW_SM_I, %r0
+       b       lws_exit                        /* Contended... */
+       ldo     -EAGAIN(%r0), %r21              /* Spin in userspace */
+
+       /*
+               prev = *addr;
+               if ( prev == old )
+                 *addr = new;
+               return prev;
+       */
+
+       /* NOTES:
+               This all works becuse intr_do_signal
+               and schedule both check the return iasq
+               and see that we are on the kernel page
+               so this process is never scheduled off
+               or is ever sent any signal of any sort,
+               thus it is wholly atomic from usrspaces
+               perspective
+       */
+cas2_action:
+       /* Jump to the correct function */
+       blr     %r29, %r0
+       /* Set %r28 as non-zero for now */
+       ldo     1(%r0),%r28
+
+       /* 8bit CAS */
+13:    ldb,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+14:    stb,ma  %r24, 0(%sr3,%r26)
+       b       cas2_end
+       copy    %r0, %r28
+       nop
+       nop
+
+       /* 16bit CAS */
+15:    ldh,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+16:    sth,ma  %r24, 0(%sr3,%r26)
+       b       cas2_end
+       copy    %r0, %r28
+       nop
+       nop
+
+       /* 32bit CAS */
+17:    ldw,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+18:    stw,ma  %r24, 0(%sr3,%r26)
+       b       cas2_end
+       copy    %r0, %r28
+       nop
+       nop
+
+       /* 64bit CAS */
+#ifdef CONFIG_64BIT
+19:    ldd,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+20:    std,ma  %r24, 0(%sr3,%r26)
+       copy    %r0, %r28
+#else
+       /* Compare first word */
+19:    ldw,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r22, %r0
+       b,n     cas2_end
+       /* Compare second word */
+20:    ldw,ma  4(%sr3,%r26), %r29
+       sub,=   %r29, %r23, %r0
+       b,n     cas2_end
+       /* Perform the store */
+21:    fstdx   %fr4, 0(%sr3,%r26)
+       copy    %r0, %r28
+#endif
+
+cas2_end:
+       /* Free lock */
+       stw,ma  %r20, 0(%sr2,%r20)
+       /* Enable interrupts */
+       ssm     PSW_SM_I, %r0
+       /* Return to userspace, set no error */
+       b       lws_exit
+       copy    %r0, %r21
+
+22:
+       /* Error occurred on load or store */
+       /* Free lock */
+       stw     %r20, 0(%sr2,%r20)
+       ssm     PSW_SM_I, %r0
+       ldo     1(%r0),%r28
+       b       lws_exit
+       ldo     -EFAULT(%r0),%r21       /* set errno */
+       nop
+       nop
+       nop
+
+       /* Exception table entries, for the load and store, return EFAULT.
+          Each of the entries must be relocated. */
+       ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
+#ifndef CONFIG_64BIT
+       ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
+#endif
+
        /* Make sure nothing else is placed on this page */
        .align PAGE_SIZE
 END(linux_gateway_page)
@@ -675,8 +899,9 @@ ENTRY(end_linux_gateway_page)
        /* Light-weight-syscall table */
        /* Start of lws table. */
 ENTRY(lws_table)
-       LWS_ENTRY(compare_and_swap32)   /* 0 - ELF32 Atomic compare and swap */
-       LWS_ENTRY(compare_and_swap64)   /* 1 - ELF64 Atomic compare and swap */
+       LWS_ENTRY(compare_and_swap32)           /* 0 - ELF32 Atomic 32bit CAS */
+       LWS_ENTRY(compare_and_swap64)           /* 1 - ELF64 Atomic 32bit CAS */
+       LWS_ENTRY(compare_and_swap_2)           /* 2 - ELF32 Atomic 64bit CAS */
 END(lws_table)
        /* End of lws table */
 
index 84c5d3a..b563d9c 100644 (file)
        ENTRY_SAME(sched_getattr)       /* 335 */
        ENTRY_COMP(utimes)
        ENTRY_SAME(renameat2)
+       ENTRY_SAME(seccomp)
+       ENTRY_SAME(getrandom)
+       ENTRY_SAME(memfd_create)        /* 340 */
 
        /* Nothing yet */
 
index 4bee1a6..45fd06c 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=15
index 6d7b22f..77d7bf3 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=15
index 4b07bad..269d6e4 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_ALTIVEC=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=24
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
index 3c72fa6..7594c5a 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_BLK_DEV_INITRD=y
index 95e545d..c8b6a9d 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 # CONFIG_COMPAT_BRK is not set
index cec044a..e5e7838 100644 (file)
@@ -3,6 +3,7 @@ CONFIG_ALTIVEC=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
index f26b267..f6c02f8 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_VSX=y
 CONFIG_SMP=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
index 438e813..587f551 100644 (file)
@@ -3,6 +3,7 @@ CONFIG_PPC_BOOK3E_64=y
 CONFIG_SMP=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_TASKSTATS=y
index fdee37f..2e637c8 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_RD_LZMA=y
index a905063..50375f1 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=2048
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
index 58e3dbf..4428ee4 100644 (file)
@@ -6,6 +6,7 @@ CONFIG_NR_CPUS=2048
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
index 279b80f..c0c61fa 100644 (file)
                                 STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
 #define STACK_FRAME_MARKER     12
 
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define STACK_FRAME_MIN_SIZE   32
+#else
+#define STACK_FRAME_MIN_SIZE   STACK_FRAME_OVERHEAD
+#endif
+
 /* Size of dummy stack frame allocated when calling signal handler. */
 #define __SIGNAL_FRAMESIZE     128
 #define __SIGNAL_FRAMESIZE32   64
@@ -60,6 +66,7 @@
 #define STACK_FRAME_REGS_MARKER        ASM_CONST(0x72656773)
 #define STACK_INT_FRAME_SIZE   (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
 #define STACK_FRAME_MARKER     2
+#define STACK_FRAME_MIN_SIZE   STACK_FRAME_OVERHEAD
 
 /* Size of stack frame allocated when calling signal handler. */
 #define __SIGNAL_FRAMESIZE     64
index 542bc0f..7d8a600 100644 (file)
@@ -362,3 +362,6 @@ SYSCALL(ni_syscall) /* sys_kcmp */
 SYSCALL_SPU(sched_setattr)
 SYSCALL_SPU(sched_getattr)
 SYSCALL_SPU(renameat2)
+SYSCALL_SPU(seccomp)
+SYSCALL_SPU(getrandom)
+SYSCALL_SPU(memfd_create)
index 5ce5552..4e9af3f 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          358
+#define __NR_syscalls          361
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index 2d526f7..0688fc0 100644 (file)
 #define __NR_sched_setattr     355
 #define __NR_sched_getattr     356
 #define __NR_renameat2         357
+#define __NR_seccomp           358
+#define __NR_getrandom         359
+#define __NR_memfd_create      360
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 74d1e78..2396dda 100644 (file)
@@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
                return 0;               /* must be 16-byte aligned */
        if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
                return 0;
-       if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
+       if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
                return 1;
        /*
         * sp could decrease when we jump off an interrupt stack
index 97ac8dc..5e1ed15 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <asm/opal.h>
 #include <asm/cputable.h>
+#include <asm/machdep.h>
 
 static int opal_hmi_handler_nb_init;
 struct OpalHmiEvtNode {
@@ -185,4 +186,4 @@ static int __init opal_hmi_handler_init(void)
        }
        return 0;
 }
-subsys_initcall(opal_hmi_handler_init);
+machine_subsys_initcall(powernv, opal_hmi_handler_init);
index c904583..17ee193 100644 (file)
@@ -113,7 +113,7 @@ out:
 static int pseries_remove_mem_node(struct device_node *np)
 {
        const char *type;
-       const unsigned int *regs;
+       const __be32 *regs;
        unsigned long base;
        unsigned int lmb_size;
        int ret = -EINVAL;
@@ -132,8 +132,8 @@ static int pseries_remove_mem_node(struct device_node *np)
        if (!regs)
                return ret;
 
-       base = *(unsigned long *)regs;
-       lmb_size = regs[3];
+       base = be64_to_cpu(*(unsigned long *)regs);
+       lmb_size = be32_to_cpu(regs[3]);
 
        pseries_remove_memblock(base, lmb_size);
        return 0;
@@ -153,7 +153,7 @@ static inline int pseries_remove_mem_node(struct device_node *np)
 static int pseries_add_mem_node(struct device_node *np)
 {
        const char *type;
-       const unsigned int *regs;
+       const __be32 *regs;
        unsigned long base;
        unsigned int lmb_size;
        int ret = -EINVAL;
@@ -172,8 +172,8 @@ static int pseries_add_mem_node(struct device_node *np)
        if (!regs)
                return ret;
 
-       base = *(unsigned long *)regs;
-       lmb_size = regs[3];
+       base = be64_to_cpu(*(unsigned long *)regs);
+       lmb_size = be32_to_cpu(regs[3]);
 
        /*
         * Update memory region to represent the memory add
@@ -187,14 +187,14 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
        struct of_drconf_cell *new_drmem, *old_drmem;
        unsigned long memblock_size;
        u32 entries;
-       u32 *p;
+       __be32 *p;
        int i, rc = -EINVAL;
 
        memblock_size = pseries_memory_block_size();
        if (!memblock_size)
                return -EINVAL;
 
-       p = (u32 *) pr->old_prop->value;
+       p = (__be32 *) pr->old_prop->value;
        if (!p)
                return -EINVAL;
 
@@ -203,28 +203,30 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
         * entries. Get the niumber of entries and skip to the array of
         * of_drconf_cell's.
         */
-       entries = *p++;
+       entries = be32_to_cpu(*p++);
        old_drmem = (struct of_drconf_cell *)p;
 
-       p = (u32 *)pr->prop->value;
+       p = (__be32 *)pr->prop->value;
        p++;
        new_drmem = (struct of_drconf_cell *)p;
 
        for (i = 0; i < entries; i++) {
-               if ((old_drmem[i].flags & DRCONF_MEM_ASSIGNED) &&
-                   (!(new_drmem[i].flags & DRCONF_MEM_ASSIGNED))) {
-                       rc = pseries_remove_memblock(old_drmem[i].base_addr,
+               if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
+                   (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
+                       rc = pseries_remove_memblock(
+                               be64_to_cpu(old_drmem[i].base_addr),
                                                     memblock_size);
                        break;
-               } else if ((!(old_drmem[i].flags & DRCONF_MEM_ASSIGNED)) &&
-                          (new_drmem[i].flags & DRCONF_MEM_ASSIGNED)) {
-                       rc = memblock_add(old_drmem[i].base_addr,
+               } else if ((!(be32_to_cpu(old_drmem[i].flags) &
+                           DRCONF_MEM_ASSIGNED)) &&
+                           (be32_to_cpu(new_drmem[i].flags) &
+                           DRCONF_MEM_ASSIGNED)) {
+                       rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
                                          memblock_size);
                        rc = (rc < 0) ? -EINVAL : 0;
                        break;
                }
        }
-
        return rc;
 }
 
index 2fcccc0..c81661e 100644 (file)
 #define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
                              sizeof(struct ipl_block_fcp))
 
-#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 8)
+#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16)
 
 #define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
                              sizeof(struct ipl_block_ccw))
 
-#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 8)
+#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16)
 
 #define IPL_MAX_SUPPORTED_VERSION (0)
 
@@ -38,10 +38,11 @@ struct ipl_list_hdr {
        u8  pbt;
        u8  flags;
        u16 reserved2;
+       u8  loadparm[8];
 } __attribute__((packed));
 
 struct ipl_block_fcp {
-       u8  reserved1[313-1];
+       u8  reserved1[305-1];
        u8  opt;
        u8  reserved2[3];
        u16 reserved3;
@@ -62,7 +63,6 @@ struct ipl_block_fcp {
                                 offsetof(struct ipl_block_fcp, scp_data)))
 
 struct ipl_block_ccw {
-       u8  load_parm[8];
        u8  reserved1[84];
        u8  reserved2[2];
        u16 devno;
index 22aac58..39badb9 100644 (file)
@@ -455,22 +455,6 @@ DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long)
 DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long)
                   IPL_PARMBLOCK_START->ipl_info.fcp.br_lba);
 
-static struct attribute *ipl_fcp_attrs[] = {
-       &sys_ipl_type_attr.attr,
-       &sys_ipl_device_attr.attr,
-       &sys_ipl_fcp_wwpn_attr.attr,
-       &sys_ipl_fcp_lun_attr.attr,
-       &sys_ipl_fcp_bootprog_attr.attr,
-       &sys_ipl_fcp_br_lba_attr.attr,
-       NULL,
-};
-
-static struct attribute_group ipl_fcp_attr_group = {
-       .attrs = ipl_fcp_attrs,
-};
-
-/* CCW ipl device attributes */
-
 static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
                                     struct kobj_attribute *attr, char *page)
 {
@@ -487,6 +471,23 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
 static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
        __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
 
+static struct attribute *ipl_fcp_attrs[] = {
+       &sys_ipl_type_attr.attr,
+       &sys_ipl_device_attr.attr,
+       &sys_ipl_fcp_wwpn_attr.attr,
+       &sys_ipl_fcp_lun_attr.attr,
+       &sys_ipl_fcp_bootprog_attr.attr,
+       &sys_ipl_fcp_br_lba_attr.attr,
+       &sys_ipl_ccw_loadparm_attr.attr,
+       NULL,
+};
+
+static struct attribute_group ipl_fcp_attr_group = {
+       .attrs = ipl_fcp_attrs,
+};
+
+/* CCW ipl device attributes */
+
 static struct attribute *ipl_ccw_attrs_vm[] = {
        &sys_ipl_type_attr.attr,
        &sys_ipl_device_attr.attr,
@@ -765,28 +766,10 @@ DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
 DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
                   reipl_block_fcp->ipl_info.fcp.devno);
 
-static struct attribute *reipl_fcp_attrs[] = {
-       &sys_reipl_fcp_device_attr.attr,
-       &sys_reipl_fcp_wwpn_attr.attr,
-       &sys_reipl_fcp_lun_attr.attr,
-       &sys_reipl_fcp_bootprog_attr.attr,
-       &sys_reipl_fcp_br_lba_attr.attr,
-       NULL,
-};
-
-static struct attribute_group reipl_fcp_attr_group = {
-       .attrs = reipl_fcp_attrs,
-};
-
-/* CCW reipl device attributes */
-
-DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
-       reipl_block_ccw->ipl_info.ccw.devno);
-
 static void reipl_get_ascii_loadparm(char *loadparm,
                                     struct ipl_parameter_block *ibp)
 {
-       memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN);
+       memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN);
        EBCASC(loadparm, LOADPARM_LEN);
        loadparm[LOADPARM_LEN] = 0;
        strim(loadparm);
@@ -821,13 +804,50 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
                return -EINVAL;
        }
        /* initialize loadparm with blanks */
-       memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN);
+       memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN);
        /* copy and convert to ebcdic */
-       memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len);
-       ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN);
+       memcpy(ipb->hdr.loadparm, buf, lp_len);
+       ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
        return len;
 }
 
+/* FCP wrapper */
+static ssize_t reipl_fcp_loadparm_show(struct kobject *kobj,
+                                      struct kobj_attribute *attr, char *page)
+{
+       return reipl_generic_loadparm_show(reipl_block_fcp, page);
+}
+
+static ssize_t reipl_fcp_loadparm_store(struct kobject *kobj,
+                                       struct kobj_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       return reipl_generic_loadparm_store(reipl_block_fcp, buf, len);
+}
+
+static struct kobj_attribute sys_reipl_fcp_loadparm_attr =
+       __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show,
+                                           reipl_fcp_loadparm_store);
+
+static struct attribute *reipl_fcp_attrs[] = {
+       &sys_reipl_fcp_device_attr.attr,
+       &sys_reipl_fcp_wwpn_attr.attr,
+       &sys_reipl_fcp_lun_attr.attr,
+       &sys_reipl_fcp_bootprog_attr.attr,
+       &sys_reipl_fcp_br_lba_attr.attr,
+       &sys_reipl_fcp_loadparm_attr.attr,
+       NULL,
+};
+
+static struct attribute_group reipl_fcp_attr_group = {
+       .attrs = reipl_fcp_attrs,
+};
+
+/* CCW reipl device attributes */
+
+DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
+       reipl_block_ccw->ipl_info.ccw.devno);
+
 /* NSS wrapper */
 static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
                                       struct kobj_attribute *attr, char *page)
@@ -1125,11 +1145,10 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
        /* LOADPARM */
        /* check if read scp info worked and set loadparm */
        if (sclp_ipl_info.is_valid)
-               memcpy(ipb->ipl_info.ccw.load_parm,
-                               &sclp_ipl_info.loadparm, LOADPARM_LEN);
+               memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
        else
                /* read scp info failed: set empty loadparm (EBCDIC blanks) */
-               memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN);
+               memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN);
        ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
 
        /* VM PARM */
@@ -1251,9 +1270,16 @@ static int __init reipl_fcp_init(void)
                return rc;
        }
 
-       if (ipl_info.type == IPL_TYPE_FCP)
+       if (ipl_info.type == IPL_TYPE_FCP) {
                memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
-       else {
+               /*
+                * Fix loadparm: There are systems where the (SCSI) LOADPARM
+                * is invalid in the SCSI IPL parameter block, so take it
+                * always from sclp_ipl_info.
+                */
+               memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm,
+                      LOADPARM_LEN);
+       } else {
                reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
                reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
                reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
@@ -1864,7 +1890,23 @@ static void __init shutdown_actions_init(void)
 
 static int __init s390_ipl_init(void)
 {
+       char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40};
+
        sclp_get_ipl_info(&sclp_ipl_info);
+       /*
+        * Fix loadparm: There are systems where the (SCSI) LOADPARM
+        * returned by read SCP info is invalid (contains EBCDIC blanks)
+        * when the system has been booted via diag308. In that case we use
+        * the value from diag308, if available.
+        *
+        * There are also systems where diag308 store does not work in
+        * case the system is booted from HMC. Fortunately in this case
+        * READ SCP info provides the correct value.
+        */
+       if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 &&
+           diag308_set_works)
+               memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm,
+                      LOADPARM_LEN);
        shutdown_actions_init();
        shutdown_triggers_init();
        return 0;
index 65fc397..7cf18f8 100644 (file)
@@ -22,13 +22,11 @@ __kernel_clock_gettime:
        basr    %r5,0
 0:     al      %r5,21f-0b(%r5)                 /* get &_vdso_data */
        chi     %r2,__CLOCK_REALTIME
-       je      10f
+       je      11f
        chi     %r2,__CLOCK_MONOTONIC
        jne     19f
 
        /* CLOCK_MONOTONIC */
-       ltr     %r3,%r3
-       jz      9f                              /* tp == NULL */
 1:     l       %r4,__VDSO_UPD_COUNT+4(%r5)     /* load update counter */
        tml     %r4,0x0001                      /* pending update ? loop */
        jnz     1b
@@ -67,12 +65,10 @@ __kernel_clock_gettime:
        j       6b
 8:     st      %r2,0(%r3)                      /* store tp->tv_sec */
        st      %r1,4(%r3)                      /* store tp->tv_nsec */
-9:     lhi     %r2,0
+       lhi     %r2,0
        br      %r14
 
        /* CLOCK_REALTIME */
-10:    ltr     %r3,%r3                         /* tp == NULL */
-       jz      18f
 11:    l       %r4,__VDSO_UPD_COUNT+4(%r5)     /* load update counter */
        tml     %r4,0x0001                      /* pending update ? loop */
        jnz     11b
@@ -111,7 +107,7 @@ __kernel_clock_gettime:
        j       15b
 17:    st      %r2,0(%r3)                      /* store tp->tv_sec */
        st      %r1,4(%r3)                      /* store tp->tv_nsec */
-18:    lhi     %r2,0
+       lhi     %r2,0
        br      %r14
 
        /* Fallback to system call */
index 91940ed..3f34e09 100644 (file)
@@ -21,7 +21,7 @@ __kernel_clock_gettime:
        .cfi_startproc
        larl    %r5,_vdso_data
        cghi    %r2,__CLOCK_REALTIME
-       je      4f
+       je      5f
        cghi    %r2,__CLOCK_THREAD_CPUTIME_ID
        je      9f
        cghi    %r2,-2          /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
@@ -30,8 +30,6 @@ __kernel_clock_gettime:
        jne     12f
 
        /* CLOCK_MONOTONIC */
-       ltgr    %r3,%r3
-       jz      3f                              /* tp == NULL */
 0:     lg      %r4,__VDSO_UPD_COUNT(%r5)       /* load update counter */
        tmll    %r4,0x0001                      /* pending update ? loop */
        jnz     0b
@@ -53,12 +51,10 @@ __kernel_clock_gettime:
        j       1b
 2:     stg     %r0,0(%r3)                      /* store tp->tv_sec */
        stg     %r1,8(%r3)                      /* store tp->tv_nsec */
-3:     lghi    %r2,0
+       lghi    %r2,0
        br      %r14
 
        /* CLOCK_REALTIME */
-4:     ltr     %r3,%r3                         /* tp == NULL */
-       jz      8f
 5:     lg      %r4,__VDSO_UPD_COUNT(%r5)       /* load update counter */
        tmll    %r4,0x0001                      /* pending update ? loop */
        jnz     5b
@@ -80,7 +76,7 @@ __kernel_clock_gettime:
        j       6b
 7:     stg     %r0,0(%r3)                      /* store tp->tv_sec */
        stg     %r1,8(%r3)                      /* store tp->tv_nsec */
-8:     lghi    %r2,0
+       lghi    %r2,0
        br      %r14
 
        /* CLOCK_THREAD_CPUTIME_ID for this thread */
index 0c1073e..c7235e0 100644 (file)
@@ -43,6 +43,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
 
 unsigned long empty_zero_page, zero_page_mask;
 EXPORT_SYMBOL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
 
 static void __init setup_zero_pages(void)
 {
index bf8daf9..37458f3 100644 (file)
@@ -105,6 +105,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
                get_page(page);
+               __flush_anon_page(page, addr);
+               flush_dcache_page(page);
                pages[*nr] = page;
                (*nr)++;
 
index 1f76c22..51ae87b 100644 (file)
@@ -234,12 +234,18 @@ do {      BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8));        \
        __emit_load8(BASE, STRUCT, FIELD, DEST);                        \
 } while (0)
 
-#define emit_ldmem(OFF, DEST)                                  \
-do {   *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST);     \
+#ifdef CONFIG_SPARC64
+#define BIAS (STACK_BIAS - 4)
+#else
+#define BIAS (-4)
+#endif
+
+#define emit_ldmem(OFF, DEST)                                          \
+do {   *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST);       \
 } while (0)
 
-#define emit_stmem(OFF, SRC)                                   \
-do {   *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC);      \
+#define emit_stmem(OFF, SRC)                                           \
+do {   *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC);        \
 } while (0)
 
 #ifdef CONFIG_SMP
@@ -615,10 +621,11 @@ void bpf_jit_compile(struct bpf_prog *fp)
                        case BPF_ANC | SKF_AD_VLAN_TAG:
                        case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
                                emit_skb_load16(vlan_tci, r_A);
-                               if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
-                                       emit_andi(r_A, VLAN_VID_MASK, r_A);
+                               if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) {
+                                       emit_alu_K(SRL, 12);
+                                       emit_andi(r_A, 1, r_A);
                                } else {
-                                       emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
+                                       emit_loadimm(~VLAN_TAG_PRESENT, r_TMP);
                                        emit_and(r_A, r_TMP, r_A);
                                }
                                break;
@@ -630,15 +637,19 @@ void bpf_jit_compile(struct bpf_prog *fp)
                                emit_loadimm(K, r_X);
                                break;
                        case BPF_LD | BPF_MEM:
+                               seen |= SEEN_MEM;
                                emit_ldmem(K * 4, r_A);
                                break;
                        case BPF_LDX | BPF_MEM:
+                               seen |= SEEN_MEM | SEEN_XREG;
                                emit_ldmem(K * 4, r_X);
                                break;
                        case BPF_ST:
+                               seen |= SEEN_MEM;
                                emit_stmem(K * 4, r_A);
                                break;
                        case BPF_STX:
+                               seen |= SEEN_MEM | SEEN_XREG;
                                emit_stmem(K * 4, r_X);
                                break;
 
index 778178f..3632743 100644 (file)
@@ -23,6 +23,7 @@ config X86
        def_bool y
        select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
        select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+       select ARCH_HAS_FAST_MULTIPLIER
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
        select HAVE_AOUT if X86_32
index f277184..dca9842 100644 (file)
@@ -1032,7 +1032,6 @@ struct boot_params *make_boot_params(struct efi_config *c)
        int i;
        unsigned long ramdisk_addr;
        unsigned long ramdisk_size;
-       unsigned long initrd_addr_max;
 
        efi_early = c;
        sys_table = (efi_system_table_t *)(unsigned long)efi_early->table;
@@ -1095,15 +1094,20 @@ struct boot_params *make_boot_params(struct efi_config *c)
 
        memset(sdt, 0, sizeof(*sdt));
 
-       if (hdr->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G)
-               initrd_addr_max = -1UL;
-       else
-               initrd_addr_max = hdr->initrd_addr_max;
-
        status = handle_cmdline_files(sys_table, image,
                                      (char *)(unsigned long)hdr->cmd_line_ptr,
-                                     "initrd=", initrd_addr_max,
+                                     "initrd=", hdr->initrd_addr_max,
                                      &ramdisk_addr, &ramdisk_size);
+
+       if (status != EFI_SUCCESS &&
+           hdr->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G) {
+               efi_printk(sys_table, "Trying to load files to higher address\n");
+               status = handle_cmdline_files(sys_table, image,
+                                     (char *)(unsigned long)hdr->cmd_line_ptr,
+                                     "initrd=", -1UL,
+                                     &ramdisk_addr, &ramdisk_size);
+       }
+
        if (status != EFI_SUCCESS)
                goto fail2;
        hdr->ramdisk_image = ramdisk_addr & 0xffffffff;
index afcd35d..cfe3b95 100644 (file)
@@ -497,8 +497,6 @@ static __always_inline int fls64(__u64 x)
 
 #include <asm-generic/bitops/sched.h>
 
-#define ARCH_HAS_FAST_MULTIPLIER 1
-
 #include <asm/arch_hweight.h>
 
 #include <asm-generic/bitops/const_hweight.h>
index 478c490..1733ab4 100644 (file)
@@ -239,6 +239,7 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; }
 static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; }
 static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; }
 static inline void mp_unmap_irq(int irq) { }
+static inline bool mp_should_keep_irq(struct device *dev) { return 1; }
 
 static inline int save_ioapic_entries(void)
 {
index 5be9063..3874693 100644 (file)
@@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512];
 extern pmd_t level2_kernel_pgt[512];
 extern pmd_t level2_fixmap_pgt[512];
 extern pmd_t level2_ident_pgt[512];
+extern pte_t level1_fixmap_pgt[512];
 extern pgd_t init_level4_pgt[];
 
 #define swapper_pg_dir init_level4_pgt
index f304773..f1314d0 100644 (file)
@@ -338,8 +338,10 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
         * a relative jump.
         */
        rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
-       if (abs(rel) > 0x7fffffff)
+       if (abs(rel) > 0x7fffffff) {
+               __arch_remove_optimized_kprobe(op, 0);
                return -ERANGE;
+       }
 
        buf = (u8 *)op->optinsn.insn;
 
index 167ffca..95a427e 100644 (file)
@@ -48,7 +48,9 @@ enum address_markers_idx {
        LOW_KERNEL_NR,
        VMALLOC_START_NR,
        VMEMMAP_START_NR,
+# ifdef CONFIG_X86_ESPFIX64
        ESPFIX_START_NR,
+# endif
        HIGH_KERNEL_NR,
        MODULES_VADDR_NR,
        MODULES_END_NR,
@@ -71,7 +73,9 @@ static struct addr_marker address_markers[] = {
        { PAGE_OFFSET,          "Low Kernel Mapping" },
        { VMALLOC_START,        "vmalloc() Area" },
        { VMEMMAP_START,        "Vmemmap" },
+# ifdef CONFIG_X86_ESPFIX64
        { ESPFIX_BASE_ADDR,     "ESPfix Area", 16 },
+# endif
        { __START_KERNEL_map,   "High Kernel Mapping" },
        { MODULES_VADDR,        "Modules" },
        { MODULES_END,          "End Modules" },
index 25e7e13..919b912 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/sched.h>
 #include <asm/elf.h>
 
-struct __read_mostly va_alignment va_align = {
+struct va_alignment __read_mostly va_align = {
        .flags = -1,
 };
 
index c61ea57..9a2b710 100644 (file)
@@ -326,27 +326,6 @@ static void pci_fixup_video(struct pci_dev *pdev)
        struct pci_bus *bus;
        u16 config;
 
-       if (!vga_default_device()) {
-               resource_size_t start, end;
-               int i;
-
-               /* Does firmware framebuffer belong to us? */
-               for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
-                       if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
-                               continue;
-
-                       start = pci_resource_start(pdev, i);
-                       end  = pci_resource_end(pdev, i);
-
-                       if (!start || !end)
-                               continue;
-
-                       if (screen_info.lfb_base >= start &&
-                           (screen_info.lfb_base + screen_info.lfb_size) < end)
-                               vga_set_default_device(pdev);
-               }
-       }
-
        /* Is VGA routed to us? */
        bus = pdev->bus;
        while (bus) {
@@ -371,8 +350,7 @@ static void pci_fixup_video(struct pci_dev *pdev)
                pci_read_config_word(pdev, PCI_COMMAND, &config);
                if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
                        pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
-                       dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n");
-                       vga_set_default_device(pdev);
+                       dev_printk(KERN_DEBUG, &pdev->dev, "Video device with shadowed ROM\n");
                }
        }
 }
index e8a1201..16fb009 100644 (file)
@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
  *
  * We can construct this by grafting the Xen provided pagetable into
  * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt.  This
- * means that only the kernel has a physical mapping to start with -
- * but that's enough to get __va working.  We need to fill in the rest
- * of the physical mapping once some sort of allocator has been set
- * up.
- * NOTE: for PVH, the page tables are native.
+ * level2_ident_pgt, and level2_kernel_pgt.  This means that only the
+ * kernel has a physical mapping to start with - but that's enough to
+ * get __va working.  We need to fill in the rest of the physical
+ * mapping once some sort of allocator has been set up.  NOTE: for
+ * PVH, the page tables are native.
  */
 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 {
@@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
                /* L3_i[0] -> level2_ident_pgt */
                convert_pfn_mfn(level3_ident_pgt);
                /* L3_k[510] -> level2_kernel_pgt
-                * L3_i[511] -> level2_fixmap_pgt */
+                * L3_k[511] -> level2_fixmap_pgt */
                convert_pfn_mfn(level3_kernel_pgt);
+
+               /* L3_k[511][506] -> level1_fixmap_pgt */
+               convert_pfn_mfn(level2_fixmap_pgt);
        }
        /* We get [511][511] and have Xen's version of level2_kernel_pgt */
        l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
@@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        addr[1] = (unsigned long)l3;
        addr[2] = (unsigned long)l2;
        /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
-        * Both L4[272][0] and L4[511][511] have entries that point to the same
+        * Both L4[272][0] and L4[511][510] have entries that point to the same
         * L2 (PMD) tables. Meaning that if you modify it in __va space
         * it will be also modified in the __ka space! (But if you just
         * modify the PMD table to point to other PTE's or none, then you
         * are OK - which is what cleanup_highmap does) */
        copy_page(level2_ident_pgt, l2);
-       /* Graft it onto L4[511][511] */
+       /* Graft it onto L4[511][510] */
        copy_page(level2_kernel_pgt, l2);
 
-       /* Get [511][510] and graft that in level2_fixmap_pgt */
-       l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
-       l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
-       copy_page(level2_fixmap_pgt, l2);
-       /* Note that we don't do anything with level1_fixmap_pgt which
-        * we don't need. */
        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
                /* Make pagetable pieces RO */
                set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
@@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
                set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
                set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
                set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
 
                /* Pin down new L4 */
                pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
index f4d27b1..9924725 100644 (file)
@@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        bool is_pm_resume;
 
        WARN_ON(irqs_disabled());
+       WARN_ON(rq->cmd_type == REQ_TYPE_FS);
 
        rq->rq_disk = bd_disk;
        rq->end_io = done;
index 5453583..7788179 100644 (file)
 #include "blk.h"
 
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
-                                            struct bio *bio)
+                                            struct bio *bio,
+                                            bool no_sg_merge)
 {
        struct bio_vec bv, bvprv = { NULL };
-       int cluster, high, highprv = 1, no_sg_merge;
+       int cluster, high, highprv = 1;
        unsigned int seg_size, nr_phys_segs;
        struct bio *fbio, *bbio;
        struct bvec_iter iter;
@@ -35,7 +36,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
        cluster = blk_queue_cluster(q);
        seg_size = 0;
        nr_phys_segs = 0;
-       no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
        high = 0;
        for_each_bio(bio) {
                bio_for_each_segment(bv, bio, iter) {
@@ -88,18 +88,23 @@ new_segment:
 
 void blk_recalc_rq_segments(struct request *rq)
 {
-       rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
+       bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
+                       &rq->q->queue_flags);
+
+       rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
+                       no_sg_merge);
 }
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
-       if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
+       if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
+                       bio->bi_vcnt < queue_max_segments(q))
                bio->bi_phys_segments = bio->bi_vcnt;
        else {
                struct bio *nxt = bio->bi_next;
 
                bio->bi_next = NULL;
-               bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+               bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
                bio->bi_next = nxt;
        }
 
index 4aac826..c88e608 100644 (file)
@@ -203,7 +203,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
        if (tag != BLK_MQ_TAG_FAIL) {
                rq = data->hctx->tags->rqs[tag];
 
-               rq->cmd_flags = 0;
                if (blk_mq_tag_busy(data->hctx)) {
                        rq->cmd_flags = REQ_MQ_INFLIGHT;
                        atomic_inc(&data->hctx->nr_active);
@@ -258,6 +257,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
 
        if (rq->cmd_flags & REQ_MQ_INFLIGHT)
                atomic_dec(&hctx->nr_active);
+       rq->cmd_flags = 0;
 
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
@@ -392,6 +392,12 @@ static void blk_mq_start_request(struct request *rq, bool last)
 
        blk_add_timer(rq);
 
+       /*
+        * Ensure that ->deadline is visible before set the started
+        * flag and clear the completed flag.
+        */
+       smp_mb__before_atomic();
+
        /*
         * Mark us as started and clear complete. Complete might have been
         * set if requeue raced with timeout, which then marked it as
@@ -473,7 +479,11 @@ static void blk_mq_requeue_work(struct work_struct *work)
                blk_mq_insert_request(rq, false, false, false);
        }
 
-       blk_mq_run_queues(q, false);
+       /*
+        * Use the start variant of queue running here, so that running
+        * the requeue work will kick stopped queues.
+        */
+       blk_mq_start_hw_queues(q);
 }
 
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
@@ -957,14 +967,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
 
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
-       if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
-           !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
-               blk_insert_flush(rq);
-       } else {
-               spin_lock(&ctx->lock);
-               __blk_mq_insert_request(hctx, rq, at_head);
-               spin_unlock(&ctx->lock);
-       }
+       spin_lock(&ctx->lock);
+       __blk_mq_insert_request(hctx, rq, at_head);
+       spin_unlock(&ctx->lock);
 
        if (run_queue)
                blk_mq_run_hw_queue(hctx, async);
@@ -1321,6 +1326,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
                                continue;
                        set->ops->exit_request(set->driver_data, tags->rqs[i],
                                                hctx_idx, i);
+                       tags->rqs[i] = NULL;
                }
        }
 
@@ -1354,8 +1360,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
 
        INIT_LIST_HEAD(&tags->page_list);
 
-       tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
-                                       GFP_KERNEL, set->numa_node);
+       tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
+                                GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+                                set->numa_node);
        if (!tags->rqs) {
                blk_mq_free_tags(tags);
                return NULL;
@@ -1379,8 +1386,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                        this_order--;
 
                do {
-                       page = alloc_pages_node(set->numa_node, GFP_KERNEL,
-                                               this_order);
+                       page = alloc_pages_node(set->numa_node,
+                               GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+                               this_order);
                        if (page)
                                break;
                        if (!this_order--)
@@ -1401,11 +1409,15 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                left -= to_do * rq_size;
                for (j = 0; j < to_do; j++) {
                        tags->rqs[i] = p;
+                       tags->rqs[i]->atomic_flags = 0;
+                       tags->rqs[i]->cmd_flags = 0;
                        if (set->ops->init_request) {
                                if (set->ops->init_request(set->driver_data,
                                                tags->rqs[i], hctx_idx, i,
-                                               set->numa_node))
+                                               set->numa_node)) {
+                                       tags->rqs[i] = NULL;
                                        goto fail;
+                               }
                        }
 
                        p += rq_size;
@@ -1416,7 +1428,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
        return tags;
 
 fail:
-       pr_warn("%s: failed to allocate requests\n", __func__);
        blk_mq_free_rq_map(set, tags, hctx_idx);
        return NULL;
 }
@@ -1936,6 +1947,60 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
+static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+       int i;
+
+       for (i = 0; i < set->nr_hw_queues; i++) {
+               set->tags[i] = blk_mq_init_rq_map(set, i);
+               if (!set->tags[i])
+                       goto out_unwind;
+       }
+
+       return 0;
+
+out_unwind:
+       while (--i >= 0)
+               blk_mq_free_rq_map(set, set->tags[i], i);
+
+       return -ENOMEM;
+}
+
+/*
+ * Allocate the request maps associated with this tag_set. Note that this
+ * may reduce the depth asked for, if memory is tight. set->queue_depth
+ * will be updated to reflect the allocated depth.
+ */
+static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+       unsigned int depth;
+       int err;
+
+       depth = set->queue_depth;
+       do {
+               err = __blk_mq_alloc_rq_maps(set);
+               if (!err)
+                       break;
+
+               set->queue_depth >>= 1;
+               if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
+                       err = -ENOMEM;
+                       break;
+               }
+       } while (set->queue_depth);
+
+       if (!set->queue_depth || err) {
+               pr_err("blk-mq: failed to allocate request map\n");
+               return -ENOMEM;
+       }
+
+       if (depth != set->queue_depth)
+               pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
+                                               depth, set->queue_depth);
+
+       return 0;
+}
+
 /*
  * Alloc a tag set to be associated with one or more request queues.
  * May fail with EINVAL for various error conditions. May adjust the
@@ -1944,8 +2009,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
  */
 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
 {
-       int i;
-
        if (!set->nr_hw_queues)
                return -EINVAL;
        if (!set->queue_depth)
@@ -1966,23 +2029,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
                                 sizeof(struct blk_mq_tags *),
                                 GFP_KERNEL, set->numa_node);
        if (!set->tags)
-               goto out;
+               return -ENOMEM;
 
-       for (i = 0; i < set->nr_hw_queues; i++) {
-               set->tags[i] = blk_mq_init_rq_map(set, i);
-               if (!set->tags[i])
-                       goto out_unwind;
-       }
+       if (blk_mq_alloc_rq_maps(set))
+               goto enomem;
 
        mutex_init(&set->tag_list_lock);
        INIT_LIST_HEAD(&set->tag_list);
 
        return 0;
-
-out_unwind:
-       while (--i >= 0)
-               blk_mq_free_rq_map(set, set->tags[i], i);
-out:
+enomem:
+       kfree(set->tags);
+       set->tags = NULL;
        return -ENOMEM;
 }
 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@ -1997,6 +2055,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
        }
 
        kfree(set->tags);
+       set->tags = NULL;
 }
 EXPORT_SYMBOL(blk_mq_free_tag_set);
 
index 4db5abf..17f5c84 100644 (file)
@@ -554,8 +554,10 @@ int blk_register_queue(struct gendisk *disk)
         * Initialization must be complete by now.  Finish the initial
         * bypass from queue allocation.
         */
-       queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
-       blk_queue_bypass_end(q);
+       if (!blk_queue_init_done(q)) {
+               queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+               blk_queue_bypass_end(q);
+       }
 
        ret = blk_trace_init_sysfs(dev);
        if (ret)
index 791f419..e6723bd 100644 (file)
@@ -28,10 +28,10 @@ struct kobject *block_depr;
 /* for extended dynamic devt allocation, currently only one major is used */
 #define NR_EXT_DEVT            (1 << MINORBITS)
 
-/* For extended devt allocation.  ext_devt_mutex prevents look up
+/* For extended devt allocation.  ext_devt_lock prevents look up
  * results from going away underneath its user.
  */
-static DEFINE_MUTEX(ext_devt_mutex);
+static DEFINE_SPINLOCK(ext_devt_lock);
 static DEFINE_IDR(ext_devt_idr);
 
 static struct device_type disk_type;
@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
        }
 
        /* allocate ext devt */
-       mutex_lock(&ext_devt_mutex);
-       idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
-       mutex_unlock(&ext_devt_mutex);
+       idr_preload(GFP_KERNEL);
+
+       spin_lock(&ext_devt_lock);
+       idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
+       spin_unlock(&ext_devt_lock);
+
+       idr_preload_end();
        if (idx < 0)
                return idx == -ENOSPC ? -EBUSY : idx;
 
@@ -441,15 +445,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
  */
 void blk_free_devt(dev_t devt)
 {
-       might_sleep();
-
        if (devt == MKDEV(0, 0))
                return;
 
        if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
-               mutex_lock(&ext_devt_mutex);
+               spin_lock(&ext_devt_lock);
                idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
-               mutex_unlock(&ext_devt_mutex);
+               spin_unlock(&ext_devt_lock);
        }
 }
 
@@ -665,7 +667,6 @@ void del_gendisk(struct gendisk *disk)
                sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
        pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
        device_del(disk_to_dev(disk));
-       blk_free_devt(disk_to_dev(disk)->devt);
 }
 EXPORT_SYMBOL(del_gendisk);
 
@@ -690,13 +691,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
        } else {
                struct hd_struct *part;
 
-               mutex_lock(&ext_devt_mutex);
+               spin_lock(&ext_devt_lock);
                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
                if (part && get_disk(part_to_disk(part))) {
                        *partno = part->partno;
                        disk = part_to_disk(part);
                }
-               mutex_unlock(&ext_devt_mutex);
+               spin_unlock(&ext_devt_lock);
        }
 
        return disk;
@@ -1098,6 +1099,7 @@ static void disk_release(struct device *dev)
 {
        struct gendisk *disk = dev_to_disk(dev);
 
+       blk_free_devt(dev->devt);
        disk_release_events(disk);
        kfree(disk->random);
        disk_replace_part_tbl(disk, NULL);
index 789cdea..0d9e5f9 100644 (file)
@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
 static void part_release(struct device *dev)
 {
        struct hd_struct *p = dev_to_part(dev);
+       blk_free_devt(dev->devt);
        free_part_stats(p);
        free_part_info(p);
        kfree(p);
@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno)
        rcu_assign_pointer(ptbl->last_lookup, NULL);
        kobject_put(part->holder_dir);
        device_del(part_to_dev(part));
-       blk_free_devt(part_devt(part));
 
        hd_struct_put(part);
 }
index 7894db9..a53ee09 100644 (file)
@@ -1922,9 +1922,6 @@ static inline int __init drbg_healthcheck_sanity(void)
        /* overflow max addtllen with personalization string */
        ret = drbg_instantiate(drbg, &addtl, coreref, pr);
        BUG_ON(0 == ret);
-       /* test uninstantated DRBG */
-       len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
-       BUG_ON(0 < len);
        /* all tests passed */
        rc = 0;
 
index 2da8660..81dc750 100644 (file)
@@ -33,7 +33,7 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
                      void *handler_context, void *region_context)
 {
        int i;
-       u8 *value = (u8 *)&value64;
+       u8 *value = (u8 *)value64;
 
        if (address > 0xff || !value64)
                return AE_BAD_PARAMETER;
index 9dfec48..fddc1e8 100644 (file)
@@ -610,7 +610,7 @@ static int acpi_lpss_suspend_late(struct device *dev)
        return acpi_dev_suspend_late(dev);
 }
 
-static int acpi_lpss_restore_early(struct device *dev)
+static int acpi_lpss_resume_early(struct device *dev)
 {
        int ret = acpi_dev_resume_early(dev);
 
@@ -650,15 +650,15 @@ static int acpi_lpss_runtime_resume(struct device *dev)
 static struct dev_pm_domain acpi_lpss_pm_domain = {
        .ops = {
 #ifdef CONFIG_PM_SLEEP
-               .suspend_late = acpi_lpss_suspend_late,
-               .restore_early = acpi_lpss_restore_early,
                .prepare = acpi_subsys_prepare,
                .complete = acpi_subsys_complete,
                .suspend = acpi_subsys_suspend,
-               .resume_early = acpi_subsys_resume_early,
+               .suspend_late = acpi_lpss_suspend_late,
+               .resume_early = acpi_lpss_resume_early,
                .freeze = acpi_subsys_freeze,
                .poweroff = acpi_subsys_suspend,
-               .poweroff_late = acpi_subsys_suspend_late,
+               .poweroff_late = acpi_lpss_suspend_late,
+               .restore_early = acpi_lpss_resume_early,
 #endif
 #ifdef CONFIG_PM_RUNTIME
                .runtime_suspend = acpi_lpss_runtime_suspend,
index 1c162e7..5fdfe65 100644 (file)
@@ -534,20 +534,6 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
                        " invalid.\n");
        }
 
-       /*
-        * When fully charged, some batteries wrongly report
-        * capacity_now = design_capacity instead of = full_charge_capacity
-        */
-       if (battery->capacity_now > battery->full_charge_capacity
-           && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
-               if (battery->capacity_now != battery->design_capacity)
-                       printk_once(KERN_WARNING FW_BUG
-                               "battery: reported current charge level (%d) "
-                               "is higher than reported maximum charge level (%d).\n",
-                               battery->capacity_now, battery->full_charge_capacity);
-               battery->capacity_now = battery->full_charge_capacity;
-       }
-
        if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
            && battery->capacity_now >= 0 && battery->capacity_now <= 100)
                battery->capacity_now = (battery->capacity_now *
index 8581f5b..8b67bd0 100644 (file)
@@ -177,16 +177,6 @@ void acpi_bus_detach_private_data(acpi_handle handle)
 }
 EXPORT_SYMBOL_GPL(acpi_bus_detach_private_data);
 
-void acpi_bus_no_hotplug(acpi_handle handle)
-{
-       struct acpi_device *adev = NULL;
-
-       acpi_bus_get_device(handle, &adev);
-       if (adev)
-               adev->flags.no_hotplug = true;
-}
-EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
-
 static void acpi_print_osc_error(acpi_handle handle,
        struct acpi_osc_context *context, char *error)
 {
index 65ea7b2..0c94b66 100644 (file)
@@ -512,7 +512,14 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
                            map, &regmap_reg_ranges_fops);
 
        if (map->max_register || regmap_readable(map, 0)) {
-               debugfs_create_file("registers", 0400, map->debugfs,
+               umode_t registers_mode;
+
+               if (IS_ENABLED(REGMAP_ALLOW_WRITE_DEBUGFS))
+                       registers_mode = 0600;
+               else
+                       registers_mode = 0400;
+
+               debugfs_create_file("registers", registers_mode, map->debugfs,
                                    map, &regmap_map_fops);
                debugfs_create_file("access", 0400, map->debugfs,
                                    map, &regmap_access_fops);
index db1e956..5c8e7fe 100644 (file)
@@ -3918,7 +3918,6 @@ skip_create_disk:
        if (rv) {
                dev_err(&dd->pdev->dev,
                        "Unable to allocate request queue\n");
-               rv = -ENOMEM;
                goto block_queue_alloc_init_error;
        }
 
index a3b042c..00d469c 100644 (file)
@@ -462,17 +462,21 @@ static int null_add_dev(void)
        struct gendisk *disk;
        struct nullb *nullb;
        sector_t size;
+       int rv;
 
        nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
-       if (!nullb)
+       if (!nullb) {
+               rv = -ENOMEM;
                goto out;
+       }
 
        spin_lock_init(&nullb->lock);
 
        if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
                submit_queues = nr_online_nodes;
 
-       if (setup_queues(nullb))
+       rv = setup_queues(nullb);
+       if (rv)
                goto out_free_nullb;
 
        if (queue_mode == NULL_Q_MQ) {
@@ -484,22 +488,29 @@ static int null_add_dev(void)
                nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
                nullb->tag_set.driver_data = nullb;
 
-               if (blk_mq_alloc_tag_set(&nullb->tag_set))
+               rv = blk_mq_alloc_tag_set(&nullb->tag_set);
+               if (rv)
                        goto out_cleanup_queues;
 
                nullb->q = blk_mq_init_queue(&nullb->tag_set);
-               if (!nullb->q)
+               if (!nullb->q) {
+                       rv = -ENOMEM;
                        goto out_cleanup_tags;
+               }
        } else if (queue_mode == NULL_Q_BIO) {
                nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
-               if (!nullb->q)
+               if (!nullb->q) {
+                       rv = -ENOMEM;
                        goto out_cleanup_queues;
+               }
                blk_queue_make_request(nullb->q, null_queue_bio);
                init_driver_queues(nullb);
        } else {
                nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
-               if (!nullb->q)
+               if (!nullb->q) {
+                       rv = -ENOMEM;
                        goto out_cleanup_queues;
+               }
                blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
                blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
                init_driver_queues(nullb);
@@ -509,8 +520,10 @@ static int null_add_dev(void)
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
 
        disk = nullb->disk = alloc_disk_node(1, home_node);
-       if (!disk)
+       if (!disk) {
+               rv = -ENOMEM;
                goto out_cleanup_blk_queue;
+       }
 
        mutex_lock(&lock);
        list_add_tail(&nullb->list, &nullb_list);
@@ -544,7 +557,7 @@ out_cleanup_queues:
 out_free_nullb:
        kfree(nullb);
 out:
-       return -ENOMEM;
+       return rv;
 }
 
 static int __init null_init(void)
index 623c841..4b97baf 100644 (file)
@@ -5087,9 +5087,11 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
        set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
        set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
 
-       rbd_dev->rq_wq = alloc_workqueue(rbd_dev->disk->disk_name, 0, 0);
-       if (!rbd_dev->rq_wq)
+       rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name);
+       if (!rbd_dev->rq_wq) {
+               ret = -ENOMEM;
                goto err_out_mapping;
+       }
 
        ret = rbd_bus_add_dev(rbd_dev);
        if (ret)
index 2e3139e..132c9cc 100644 (file)
@@ -36,6 +36,7 @@ struct virtrng_info {
        int index;
        bool busy;
        bool hwrng_register_done;
+       bool hwrng_removed;
 };
 
 
@@ -68,6 +69,9 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
        int ret;
        struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
 
+       if (vi->hwrng_removed)
+               return -ENODEV;
+
        if (!vi->busy) {
                vi->busy = true;
                init_completion(&vi->have_data);
@@ -137,6 +141,9 @@ static void remove_common(struct virtio_device *vdev)
 {
        struct virtrng_info *vi = vdev->priv;
 
+       vi->hwrng_removed = true;
+       vi->data_avail = 0;
+       complete(&vi->have_data);
        vdev->config->reset(vdev);
        vi->busy = false;
        if (vi->hwrng_register_done)
index 0300c46..32f7c1b 100644 (file)
@@ -447,7 +447,7 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
        int i;
 
        num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
-       if (num_parents <= 0 || num_parents > 1)
+       if (num_parents != 2)
                return;
 
        for (i = 0; i < num_parents; ++i) {
index bac2ddf..73a8d0f 100644 (file)
@@ -22,7 +22,7 @@ static struct clk_onecell_data clk_data = {
        .clk_num = ARRAY_SIZE(clk),
 };
 
-static int __init efm32gg_cmu_init(struct device_node *np)
+static void __init efm32gg_cmu_init(struct device_node *np)
 {
        int i;
        void __iomem *base;
@@ -33,7 +33,7 @@ static int __init efm32gg_cmu_init(struct device_node *np)
        base = of_iomap(np, 0);
        if (!base) {
                pr_warn("Failed to map address range for efm32gg,cmu node\n");
-               return -EADDRNOTAVAIL;
+               return;
        }
 
        clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL,
@@ -76,6 +76,6 @@ static int __init efm32gg_cmu_init(struct device_node *np)
        clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0",
                        "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
 
-       return of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+       of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
 }
 CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
index b76fa69..bacc06f 100644 (file)
@@ -1467,6 +1467,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
 static void clk_change_rate(struct clk *clk)
 {
        struct clk *child;
+       struct hlist_node *tmp;
        unsigned long old_rate;
        unsigned long best_parent_rate = 0;
        bool skip_set_rate = false;
@@ -1502,7 +1503,11 @@ static void clk_change_rate(struct clk *clk)
        if (clk->notifier_count && old_rate != clk->rate)
                __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
 
-       hlist_for_each_entry(child, &clk->children, child_node) {
+       /*
+        * Use safe iteration, as change_rate can actually swap parents
+        * for certain clock types.
+        */
+       hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) {
                /* Skip children who will be reparented to another clock */
                if (child->new_parent && child->new_parent != clk)
                        continue;
index 4032e51..3b83b7d 100644 (file)
@@ -1095,7 +1095,7 @@ static struct clk_branch prng_clk = {
 };
 
 static const struct freq_tbl clk_tbl_sdc[] = {
-       {    144000, P_PXO,   5, 18,625 },
+       {    200000, P_PXO,   2, 2, 125 },
        {    400000, P_PLL8,  4, 1, 240 },
        {  16000000, P_PLL8,  4, 1,   6 },
        {  17070000, P_PLL8,  1, 2,  45 },
index 0d8c6c5..b22a2d2 100644 (file)
@@ -545,7 +545,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(PCLK_PWM, "pclk_pwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 0, GFLAGS),
        GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS),
        GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS),
-       GATE(PCLK_I2C1, "pclk_i2c1", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
+       GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
        GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS),
        GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS),
        GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS),
@@ -603,7 +603,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 15, GFLAGS),
        GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 11, GFLAGS),
        GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 12, GFLAGS),
-       GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS),
+       GATE(PCLK_I2C1, "pclk_i2c1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS),
        GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 14, GFLAGS),
        GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 1, GFLAGS),
        GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 2, GFLAGS),
index 4a65b41..af29359 100644 (file)
@@ -139,9 +139,13 @@ static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
 static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
                            unsigned long parent_rate)
 {
-       struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+       struct dra7_atl_desc *cdesc;
        u32 divider;
 
+       if (!hw || !rate)
+               return -EINVAL;
+
+       cdesc = to_atl_desc(hw);
        divider = ((parent_rate + rate / 2) / rate) - 1;
        if (divider > DRA7_ATL_DIVIDER_MASK)
                divider = DRA7_ATL_DIVIDER_MASK;
index e6aa10d..a837f70 100644 (file)
@@ -211,11 +211,16 @@ static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
 static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
                                   unsigned long parent_rate)
 {
-       struct clk_divider *divider = to_clk_divider(hw);
+       struct clk_divider *divider;
        unsigned int div, value;
        unsigned long flags = 0;
        u32 val;
 
+       if (!hw || !rate)
+               return -EINVAL;
+
+       divider = to_clk_divider(hw);
+
        div = DIV_ROUND_UP(parent_rate, rate);
        value = _get_val(divider, div);
 
index f7a32d2..773bcde 100644 (file)
@@ -60,7 +60,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
                goto out;
        }
 
-       freq_table = kcalloc(sizeof(*freq_table), (max_opps + 1), GFP_ATOMIC);
+       freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
        if (!freq_table) {
                ret = -ENOMEM;
                goto out;
index 6a9d89c..ae2ab14 100644 (file)
@@ -362,8 +362,9 @@ static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
                        vchan_cyclic_callback(&chan->desc->vdesc);
                } else {
                        if (chan->next_sg == chan->desc->num_sgs) {
-                               chan->desc = NULL;
+                               list_del(&chan->desc->vdesc.node);
                                vchan_cookie_complete(&chan->desc->vdesc);
+                               chan->desc = NULL;
                        }
                }
        }
index a56bb35..c846a96 100644 (file)
@@ -22,7 +22,7 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
                        unsigned long map_size, unsigned long desc_size,
                        u32 desc_ver)
 {
-       int node, prev;
+       int node, prev, num_rsv;
        int status;
        u32 fdt_val32;
        u64 fdt_val64;
@@ -73,6 +73,14 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
                prev = node;
        }
 
+       /*
+        * Delete all memory reserve map entries. When booting via UEFI,
+        * kernel will use the UEFI memory map to find reserved regions.
+        */
+       num_rsv = fdt_num_mem_rsv(fdt);
+       while (num_rsv-- > 0)
+               fdt_del_mem_rsv(fdt, num_rsv);
+
        node = fdt_subnode_offset(fdt, 0, "chosen");
        if (node < 0) {
                node = fdt_add_subnode(fdt, 0, "chosen");
index a2cc6be..b792194 100644 (file)
@@ -67,6 +67,7 @@ static int ast_detect_chip(struct drm_device *dev)
 {
        struct ast_private *ast = dev->dev_private;
        uint32_t data, jreg;
+       ast_open_key(ast);
 
        if (dev->pdev->device == PCI_CHIP_AST1180) {
                ast->chip = AST1100;
@@ -104,7 +105,7 @@ static int ast_detect_chip(struct drm_device *dev)
                        }
                        ast->vga2_clone = false;
                } else {
-                       ast->chip = 2000;
+                       ast->chip = AST2000;
                        DRM_INFO("AST 2000 detected\n");
                }
        }
index 9d7346b..6b7efcf 100644 (file)
@@ -250,6 +250,7 @@ static void bochs_connector_init(struct drm_device *dev)
                           DRM_MODE_CONNECTOR_VIRTUAL);
        drm_connector_helper_add(connector,
                                 &bochs_connector_connector_helper_funcs);
+       drm_connector_register(connector);
 }
 
 
index e1c5c32..c7c5a9d 100644 (file)
@@ -555,6 +555,7 @@ static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
 
        drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
 
+       drm_connector_register(connector);
        return connector;
 }
 
index 2e7f03a..9933c26 100644 (file)
@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        intel_power_domains_init_hw(dev_priv);
 
+       /*
+        * We enable some interrupt sources in our postinstall hooks, so mark
+        * interrupts as enabled _before_ actually enabling them to avoid
+        * special cases in our ordering checks.
+        */
+       dev_priv->pm._irqs_disabled = false;
+
        ret = drm_irq_install(dev, dev->pdev->irq);
        if (ret)
                goto cleanup_gem_stolen;
 
-       dev_priv->pm._irqs_disabled = false;
-
        /* Important: The output setup functions called by modeset_init need
         * working irqs for e.g. gmbus and dp aux transfers. */
        intel_modeset_init(dev);
index 7a830ea..3524306 100644 (file)
@@ -184,6 +184,7 @@ enum hpd_pin {
                if ((1 << (domain)) & (mask))
 
 struct drm_i915_private;
+struct i915_mm_struct;
 struct i915_mmu_object;
 
 enum intel_dpll_id {
@@ -1506,9 +1507,8 @@ struct drm_i915_private {
        struct i915_gtt gtt; /* VM representing the global address space */
 
        struct i915_gem_mm mm;
-#if defined(CONFIG_MMU_NOTIFIER)
-       DECLARE_HASHTABLE(mmu_notifiers, 7);
-#endif
+       DECLARE_HASHTABLE(mm_structs, 7);
+       struct mutex mm_lock;
 
        /* Kernel Modesetting */
 
@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object {
                        unsigned workers :4;
 #define I915_GEM_USERPTR_MAX_WORKERS 15
 
-                       struct mm_struct *mm;
-                       struct i915_mmu_object *mn;
+                       struct i915_mm_struct *mm;
+                       struct i915_mmu_object *mmu_object;
                        struct work_struct *work;
                } userptr;
        };
index ba7f5c6..ad55b06 100644 (file)
@@ -1590,10 +1590,13 @@ unlock:
 out:
        switch (ret) {
        case -EIO:
-               /* If this -EIO is due to a gpu hang, give the reset code a
-                * chance to clean up the mess. Otherwise return the proper
-                * SIGBUS. */
-               if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+               /*
+                * We eat errors when the gpu is terminally wedged to avoid
+                * userspace unduly crashing (gl has no provisions for mmaps to
+                * fail). But any other -EIO isn't ours (e.g. swap in failure)
+                * and so needs to be reported.
+                */
+               if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
                        ret = VM_FAULT_SIGBUS;
                        break;
                }
index fe69fc8..d384139 100644 (file)
 #include <linux/mempolicy.h>
 #include <linux/swap.h>
 
+struct i915_mm_struct {
+       struct mm_struct *mm;
+       struct drm_device *dev;
+       struct i915_mmu_notifier *mn;
+       struct hlist_node node;
+       struct kref kref;
+       struct work_struct work;
+};
+
 #if defined(CONFIG_MMU_NOTIFIER)
 #include <linux/interval_tree.h>
 
@@ -41,16 +50,12 @@ struct i915_mmu_notifier {
        struct mmu_notifier mn;
        struct rb_root objects;
        struct list_head linear;
-       struct drm_device *dev;
-       struct mm_struct *mm;
-       struct work_struct work;
-       unsigned long count;
        unsigned long serial;
        bool has_linear;
 };
 
 struct i915_mmu_object {
-       struct i915_mmu_notifier *mmu;
+       struct i915_mmu_notifier *mn;
        struct interval_tree_node it;
        struct list_head link;
        struct drm_i915_gem_object *obj;
@@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
                                      unsigned long start,
                                      unsigned long end)
 {
-       struct i915_mmu_object *mmu;
+       struct i915_mmu_object *mo;
        unsigned long serial;
 
 restart:
        serial = mn->serial;
-       list_for_each_entry(mmu, &mn->linear, link) {
+       list_for_each_entry(mo, &mn->linear, link) {
                struct drm_i915_gem_object *obj;
 
-               if (mmu->it.last < start || mmu->it.start > end)
+               if (mo->it.last < start || mo->it.start > end)
                        continue;
 
-               obj = mmu->obj;
+               obj = mo->obj;
                drm_gem_object_reference(&obj->base);
                spin_unlock(&mn->lock);
 
@@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
 };
 
 static struct i915_mmu_notifier *
-__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_mmu_notifier *mmu;
-
-       /* Protected by dev->struct_mutex */
-       hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
-               if (mmu->mm == mm)
-                       return mmu;
-
-       return NULL;
-}
-
-static struct i915_mmu_notifier *
-i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
+i915_mmu_notifier_create(struct mm_struct *mm)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_mmu_notifier *mmu;
+       struct i915_mmu_notifier *mn;
        int ret;
 
-       lockdep_assert_held(&dev->struct_mutex);
-
-       mmu = __i915_mmu_notifier_lookup(dev, mm);
-       if (mmu)
-               return mmu;
-
-       mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
-       if (mmu == NULL)
+       mn = kmalloc(sizeof(*mn), GFP_KERNEL);
+       if (mn == NULL)
                return ERR_PTR(-ENOMEM);
 
-       spin_lock_init(&mmu->lock);
-       mmu->dev = dev;
-       mmu->mn.ops = &i915_gem_userptr_notifier;
-       mmu->mm = mm;
-       mmu->objects = RB_ROOT;
-       mmu->count = 0;
-       mmu->serial = 1;
-       INIT_LIST_HEAD(&mmu->linear);
-       mmu->has_linear = false;
-
-       /* Protected by mmap_sem (write-lock) */
-       ret = __mmu_notifier_register(&mmu->mn, mm);
+       spin_lock_init(&mn->lock);
+       mn->mn.ops = &i915_gem_userptr_notifier;
+       mn->objects = RB_ROOT;
+       mn->serial = 1;
+       INIT_LIST_HEAD(&mn->linear);
+       mn->has_linear = false;
+
+        /* Protected by mmap_sem (write-lock) */
+       ret = __mmu_notifier_register(&mn->mn, mm);
        if (ret) {
-               kfree(mmu);
+               kfree(mn);
                return ERR_PTR(ret);
        }
 
-       /* Protected by dev->struct_mutex */
-       hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
-       return mmu;
+       return mn;
 }
 
-static void
-__i915_mmu_notifier_destroy_worker(struct work_struct *work)
+static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
 {
-       struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
-       mmu_notifier_unregister(&mmu->mn, mmu->mm);
-       kfree(mmu);
-}
-
-static void
-__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
-{
-       lockdep_assert_held(&mmu->dev->struct_mutex);
-
-       /* Protected by dev->struct_mutex */
-       hash_del(&mmu->node);
-
-       /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
-        * We enter the function holding struct_mutex, therefore we need
-        * to drop our mutex prior to calling mmu_notifier_unregister in
-        * order to prevent lock inversion (and system-wide deadlock)
-        * between the mmap_sem and struct-mutex. Hence we defer the
-        * unregistration to a workqueue where we hold no locks.
-        */
-       INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
-       schedule_work(&mmu->work);
-}
-
-static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
-{
-       if (++mmu->serial == 0)
-               mmu->serial = 1;
-}
-
-static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
-{
-       struct i915_mmu_object *mn;
-
-       list_for_each_entry(mn, &mmu->linear, link)
-               if (mn->is_linear)
-                       return true;
-
-       return false;
-}
-
-static void
-i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
-                     struct i915_mmu_object *mn)
-{
-       lockdep_assert_held(&mmu->dev->struct_mutex);
-
-       spin_lock(&mmu->lock);
-       list_del(&mn->link);
-       if (mn->is_linear)
-               mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
-       else
-               interval_tree_remove(&mn->it, &mmu->objects);
-       __i915_mmu_notifier_update_serial(mmu);
-       spin_unlock(&mmu->lock);
-
-       /* Protected against _add() by dev->struct_mutex */
-       if (--mmu->count == 0)
-               __i915_mmu_notifier_destroy(mmu);
+       if (++mn->serial == 0)
+               mn->serial = 1;
 }
 
 static int
-i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
-                     struct i915_mmu_object *mn)
+i915_mmu_notifier_add(struct drm_device *dev,
+                     struct i915_mmu_notifier *mn,
+                     struct i915_mmu_object *mo)
 {
        struct interval_tree_node *it;
        int ret;
 
-       ret = i915_mutex_lock_interruptible(mmu->dev);
+       ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
@@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
         * remove the objects from the interval tree) before we do
         * the check for overlapping objects.
         */
-       i915_gem_retire_requests(mmu->dev);
+       i915_gem_retire_requests(dev);
 
-       spin_lock(&mmu->lock);
-       it = interval_tree_iter_first(&mmu->objects,
-                                     mn->it.start, mn->it.last);
+       spin_lock(&mn->lock);
+       it = interval_tree_iter_first(&mn->objects,
+                                     mo->it.start, mo->it.last);
        if (it) {
                struct drm_i915_gem_object *obj;
 
@@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
 
                obj = container_of(it, struct i915_mmu_object, it)->obj;
                if (!obj->userptr.workers)
-                       mmu->has_linear = mn->is_linear = true;
+                       mn->has_linear = mo->is_linear = true;
                else
                        ret = -EAGAIN;
        } else
-               interval_tree_insert(&mn->it, &mmu->objects);
+               interval_tree_insert(&mo->it, &mn->objects);
 
        if (ret == 0) {
-               list_add(&mn->link, &mmu->linear);
-               __i915_mmu_notifier_update_serial(mmu);
+               list_add(&mo->link, &mn->linear);
+               __i915_mmu_notifier_update_serial(mn);
        }
-       spin_unlock(&mmu->lock);
-       mutex_unlock(&mmu->dev->struct_mutex);
+       spin_unlock(&mn->lock);
+       mutex_unlock(&dev->struct_mutex);
 
        return ret;
 }
 
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
+{
+       struct i915_mmu_object *mo;
+
+       list_for_each_entry(mo, &mn->linear, link)
+               if (mo->is_linear)
+                       return true;
+
+       return false;
+}
+
+static void
+i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
+                     struct i915_mmu_object *mo)
+{
+       spin_lock(&mn->lock);
+       list_del(&mo->link);
+       if (mo->is_linear)
+               mn->has_linear = i915_mmu_notifier_has_linear(mn);
+       else
+               interval_tree_remove(&mo->it, &mn->objects);
+       __i915_mmu_notifier_update_serial(mn);
+       spin_unlock(&mn->lock);
+}
+
 static void
 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 {
-       struct i915_mmu_object *mn;
+       struct i915_mmu_object *mo;
 
-       mn = obj->userptr.mn;
-       if (mn == NULL)
+       mo = obj->userptr.mmu_object;
+       if (mo == NULL)
                return;
 
-       i915_mmu_notifier_del(mn->mmu, mn);
-       obj->userptr.mn = NULL;
+       i915_mmu_notifier_del(mo->mn, mo);
+       kfree(mo);
+
+       obj->userptr.mmu_object = NULL;
+}
+
+static struct i915_mmu_notifier *
+i915_mmu_notifier_find(struct i915_mm_struct *mm)
+{
+       if (mm->mn == NULL) {
+               down_write(&mm->mm->mmap_sem);
+               mutex_lock(&to_i915(mm->dev)->mm_lock);
+               if (mm->mn == NULL)
+                       mm->mn = i915_mmu_notifier_create(mm->mm);
+               mutex_unlock(&to_i915(mm->dev)->mm_lock);
+               up_write(&mm->mm->mmap_sem);
+       }
+       return mm->mn;
 }
 
 static int
 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
                                    unsigned flags)
 {
-       struct i915_mmu_notifier *mmu;
-       struct i915_mmu_object *mn;
+       struct i915_mmu_notifier *mn;
+       struct i915_mmu_object *mo;
        int ret;
 
        if (flags & I915_USERPTR_UNSYNCHRONIZED)
                return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
 
-       down_write(&obj->userptr.mm->mmap_sem);
-       ret = i915_mutex_lock_interruptible(obj->base.dev);
-       if (ret == 0) {
-               mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
-               if (!IS_ERR(mmu))
-                       mmu->count++; /* preemptive add to act as a refcount */
-               else
-                       ret = PTR_ERR(mmu);
-               mutex_unlock(&obj->base.dev->struct_mutex);
-       }
-       up_write(&obj->userptr.mm->mmap_sem);
-       if (ret)
-               return ret;
+       if (WARN_ON(obj->userptr.mm == NULL))
+               return -EINVAL;
 
-       mn = kzalloc(sizeof(*mn), GFP_KERNEL);
-       if (mn == NULL) {
-               ret = -ENOMEM;
-               goto destroy_mmu;
-       }
+       mn = i915_mmu_notifier_find(obj->userptr.mm);
+       if (IS_ERR(mn))
+               return PTR_ERR(mn);
 
-       mn->mmu = mmu;
-       mn->it.start = obj->userptr.ptr;
-       mn->it.last = mn->it.start + obj->base.size - 1;
-       mn->obj = obj;
+       mo = kzalloc(sizeof(*mo), GFP_KERNEL);
+       if (mo == NULL)
+               return -ENOMEM;
 
-       ret = i915_mmu_notifier_add(mmu, mn);
-       if (ret)
-               goto free_mn;
+       mo->mn = mn;
+       mo->it.start = obj->userptr.ptr;
+       mo->it.last = mo->it.start + obj->base.size - 1;
+       mo->obj = obj;
 
-       obj->userptr.mn = mn;
+       ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
+       if (ret) {
+               kfree(mo);
+               return ret;
+       }
+
+       obj->userptr.mmu_object = mo;
        return 0;
+}
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+                      struct mm_struct *mm)
+{
+       if (mn == NULL)
+               return;
 
-free_mn:
+       mmu_notifier_unregister(&mn->mn, mm);
        kfree(mn);
-destroy_mmu:
-       mutex_lock(&obj->base.dev->struct_mutex);
-       if (--mmu->count == 0)
-               __i915_mmu_notifier_destroy(mmu);
-       mutex_unlock(&obj->base.dev->struct_mutex);
-       return ret;
 }
 
 #else
@@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 
        return 0;
 }
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+                      struct mm_struct *mm)
+{
+}
+
 #endif
 
+static struct i915_mm_struct *
+__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
+{
+       struct i915_mm_struct *mm;
+
+       /* Protected by dev_priv->mm_lock */
+       hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
+               if (mm->mm == real)
+                       return mm;
+
+       return NULL;
+}
+
+static int
+i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct i915_mm_struct *mm;
+       int ret = 0;
+
+       /* During release of the GEM object we hold the struct_mutex. This
+        * precludes us from calling mmput() at that time as that may be
+        * the last reference and so call exit_mmap(). exit_mmap() will
+        * attempt to reap the vma, and if we were holding a GTT mmap
+        * would then call drm_gem_vm_close() and attempt to reacquire
+        * the struct mutex. So in order to avoid that recursion, we have
+        * to defer releasing the mm reference until after we drop the
+        * struct_mutex, i.e. we need to schedule a worker to do the clean
+        * up.
+        */
+       mutex_lock(&dev_priv->mm_lock);
+       mm = __i915_mm_struct_find(dev_priv, current->mm);
+       if (mm == NULL) {
+               mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+               if (mm == NULL) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               kref_init(&mm->kref);
+               mm->dev = obj->base.dev;
+
+               mm->mm = current->mm;
+               atomic_inc(&current->mm->mm_count);
+
+               mm->mn = NULL;
+
+               /* Protected by dev_priv->mm_lock */
+               hash_add(dev_priv->mm_structs,
+                        &mm->node, (unsigned long)mm->mm);
+       } else
+               kref_get(&mm->kref);
+
+       obj->userptr.mm = mm;
+out:
+       mutex_unlock(&dev_priv->mm_lock);
+       return ret;
+}
+
+static void
+__i915_mm_struct_free__worker(struct work_struct *work)
+{
+       struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
+       i915_mmu_notifier_free(mm->mn, mm->mm);
+       mmdrop(mm->mm);
+       kfree(mm);
+}
+
+static void
+__i915_mm_struct_free(struct kref *kref)
+{
+       struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
+
+       /* Protected by dev_priv->mm_lock */
+       hash_del(&mm->node);
+       mutex_unlock(&to_i915(mm->dev)->mm_lock);
+
+       INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
+       schedule_work(&mm->work);
+}
+
+static void
+i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
+{
+       if (obj->userptr.mm == NULL)
+               return;
+
+       kref_put_mutex(&obj->userptr.mm->kref,
+                      __i915_mm_struct_free,
+                      &to_i915(obj->base.dev)->mm_lock);
+       obj->userptr.mm = NULL;
+}
+
 struct get_pages_work {
        struct work_struct work;
        struct drm_i915_gem_object *obj;
        struct task_struct *task;
 };
 
-
 #if IS_ENABLED(CONFIG_SWIOTLB)
 #define swiotlb_active() swiotlb_nr_tbl()
 #else
@@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        if (pvec == NULL)
                pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
        if (pvec != NULL) {
-               struct mm_struct *mm = obj->userptr.mm;
+               struct mm_struct *mm = obj->userptr.mm->mm;
 
                down_read(&mm->mmap_sem);
                while (pinned < num_pages) {
@@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 
        pvec = NULL;
        pinned = 0;
-       if (obj->userptr.mm == current->mm) {
+       if (obj->userptr.mm->mm == current->mm) {
                pvec = kmalloc(num_pages*sizeof(struct page *),
                               GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
                if (pvec == NULL) {
@@ -651,17 +708,13 @@ static void
 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
 {
        i915_gem_userptr_release__mmu_notifier(obj);
-
-       if (obj->userptr.mm) {
-               mmput(obj->userptr.mm);
-               obj->userptr.mm = NULL;
-       }
+       i915_gem_userptr_release__mm_struct(obj);
 }
 
 static int
 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 {
-       if (obj->userptr.mn)
+       if (obj->userptr.mmu_object)
                return 0;
 
        return i915_gem_userptr_init__mmu_notifier(obj, 0);
@@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
                return -ENODEV;
        }
 
-       /* Allocate the new object */
        obj = i915_gem_object_alloc(dev);
        if (obj == NULL)
                return -ENOMEM;
@@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
         * at binding. This means that we need to hook into the mmu_notifier
         * in order to detect if the mmu is destroyed.
         */
-       ret = -ENOMEM;
-       if ((obj->userptr.mm = get_task_mm(current)))
+       ret = i915_gem_userptr_init__mm_struct(obj);
+       if (ret == 0)
                ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
        if (ret == 0)
                ret = drm_gem_handle_create(file, &obj->base, &handle);
@@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
 int
 i915_gem_init_userptr(struct drm_device *dev)
 {
-#if defined(CONFIG_MMU_NOTIFIER)
        struct drm_i915_private *dev_priv = to_i915(dev);
-       hash_init(dev_priv->mmu_notifiers);
-#endif
+       mutex_init(&dev_priv->mm_lock);
+       hash_init(dev_priv->mm_structs);
        return 0;
 }
index e4d7607..f29b44c 100644 (file)
 #define GFX_OP_DESTBUFFER_INFO  ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
 #define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
 #define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
-#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
+
+#define COLOR_BLT_CMD                  (2<<29 | 0x40<<22 | (5-2))
+#define SRC_COPY_BLT_CMD               ((2<<29)|(0x43<<22)|4)
 #define XY_SRC_COPY_BLT_CMD            ((2<<29)|(0x53<<22)|6)
 #define XY_MONO_SRC_COPY_IMM_BLT       ((2<<29)|(0x71<<22)|5)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA    (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB      (1<<20)
+#define   BLT_WRITE_A                  (2<<20)
+#define   BLT_WRITE_RGB                        (1<<20)
+#define   BLT_WRITE_RGBA               (BLT_WRITE_RGB | BLT_WRITE_A)
 #define   BLT_DEPTH_8                  (0<<24)
 #define   BLT_DEPTH_16_565             (1<<24)
 #define   BLT_DEPTH_16_1555            (2<<24)
 #define   BLT_DEPTH_32                 (3<<24)
-#define   BLT_ROP_GXCOPY               (0xcc<<16)
+#define   BLT_ROP_SRC_COPY             (0xcc<<16)
+#define   BLT_ROP_COLOR_COPY           (0xf0<<16)
 #define XY_SRC_COPY_BLT_SRC_TILED      (1<<15) /* 965+ only */
 #define XY_SRC_COPY_BLT_DST_TILED      (1<<11) /* 965+ only */
 #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
index 81d7681..fdff1d4 100644 (file)
@@ -1631,6 +1631,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 
        pipe_config->adjusted_mode.flags |= flags;
 
+       if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
+           tmp & DP_COLOR_RANGE_16_235)
+               pipe_config->limited_color_range = true;
+
        pipe_config->has_dp_encoder = true;
 
        intel_dp_get_m_n(crtc, pipe_config);
index f9151f6..ca34de7 100644 (file)
@@ -712,7 +712,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
                                  struct intel_crtc_config *pipe_config)
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 tmp, flags = 0;
        int dotclock;
 
@@ -734,6 +735,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
        if (tmp & HDMI_MODE_SELECT_HDMI)
                pipe_config->has_audio = true;
 
+       if (!HAS_PCH_SPLIT(dev) &&
+           tmp & HDMI_COLOR_RANGE_16_235)
+               pipe_config->limited_color_range = true;
+
        pipe_config->adjusted_mode.flags |= flags;
 
        if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
index 16371a4..47a126a 100644 (file)
@@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
 
 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
 #define I830_BATCH_LIMIT (256*1024)
+#define I830_TLB_ENTRIES (2)
+#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
 i830_dispatch_execbuffer(struct intel_engine_cs *ring,
                                u64 offset, u32 len,
                                unsigned flags)
 {
+       u32 cs_offset = ring->scratch.gtt_offset;
        int ret;
 
-       if (flags & I915_DISPATCH_PINNED) {
-               ret = intel_ring_begin(ring, 4);
-               if (ret)
-                       return ret;
+       ret = intel_ring_begin(ring, 6);
+       if (ret)
+               return ret;
 
-               intel_ring_emit(ring, MI_BATCH_BUFFER);
-               intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
-               intel_ring_emit(ring, offset + len - 8);
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_advance(ring);
-       } else {
-               u32 cs_offset = ring->scratch.gtt_offset;
+       /* Evict the invalid PTE TLBs */
+       intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+       intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+       intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+       intel_ring_emit(ring, cs_offset);
+       intel_ring_emit(ring, 0xdeadbeef);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
+       if ((flags & I915_DISPATCH_PINNED) == 0) {
                if (len > I830_BATCH_LIMIT)
                        return -ENOSPC;
 
-               ret = intel_ring_begin(ring, 9+3);
+               ret = intel_ring_begin(ring, 6 + 2);
                if (ret)
                        return ret;
-               /* Blit the batch (which has now all relocs applied) to the stable batch
-                * scratch bo area (so that the CS never stumbles over its tlb
-                * invalidation bug) ... */
-               intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
-                               XY_SRC_COPY_BLT_WRITE_ALPHA |
-                               XY_SRC_COPY_BLT_WRITE_RGB);
-               intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
-               intel_ring_emit(ring, 0);
-               intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+
+               /* Blit the batch (which has now all relocs applied) to the
+                * stable batch scratch bo area (so that the CS never
+                * stumbles over its tlb invalidation bug) ...
+                */
+               intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+               intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+               intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
                intel_ring_emit(ring, cs_offset);
-               intel_ring_emit(ring, 0);
                intel_ring_emit(ring, 4096);
                intel_ring_emit(ring, offset);
+
                intel_ring_emit(ring, MI_FLUSH);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_advance(ring);
 
                /* ... and execute it. */
-               intel_ring_emit(ring, MI_BATCH_BUFFER);
-               intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
-               intel_ring_emit(ring, cs_offset + len - 8);
-               intel_ring_advance(ring);
+               offset = cs_offset;
        }
 
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, MI_BATCH_BUFFER);
+       intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+       intel_ring_emit(ring, offset + len - 8);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+
        return 0;
 }
 
@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 
        /* Workaround batchbuffer to combat CS tlb bug. */
        if (HAS_BROKEN_CS_TLB(dev)) {
-               obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+               obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
                if (obj == NULL) {
                        DRM_ERROR("Failed to allocate batch bo\n");
                        return -ENOMEM;
index c69d3ce..c14341c 100644 (file)
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       /* Prevents vblank waits from timing out in intel_tv_detect_type() */
+       intel_wait_for_vblank(encoder->base.dev,
+                             to_intel_crtc(encoder->base.crtc)->pipe);
+
        I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
 }
 
index a125a7e..c6c9b02 100644 (file)
@@ -258,28 +258,30 @@ static void set_hdmi_pdev(struct drm_device *dev,
        priv->hdmi_pdev = pdev;
 }
 
+#ifdef CONFIG_OF
+static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
+{
+       int gpio = of_get_named_gpio(of_node, name, 0);
+       if (gpio < 0) {
+               char name2[32];
+               snprintf(name2, sizeof(name2), "%s-gpio", name);
+               gpio = of_get_named_gpio(of_node, name2, 0);
+               if (gpio < 0) {
+                       dev_err(dev, "failed to get gpio: %s (%d)\n",
+                                       name, gpio);
+                       gpio = -1;
+               }
+       }
+       return gpio;
+}
+#endif
+
 static int hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        static struct hdmi_platform_config config = {};
 #ifdef CONFIG_OF
        struct device_node *of_node = dev->of_node;
 
-       int get_gpio(const char *name)
-       {
-               int gpio = of_get_named_gpio(of_node, name, 0);
-               if (gpio < 0) {
-                       char name2[32];
-                       snprintf(name2, sizeof(name2), "%s-gpio", name);
-                       gpio = of_get_named_gpio(of_node, name2, 0);
-                       if (gpio < 0) {
-                               dev_err(dev, "failed to get gpio: %s (%d)\n",
-                                               name, gpio);
-                               gpio = -1;
-                       }
-               }
-               return gpio;
-       }
-
        if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
                static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
                static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
@@ -312,12 +314,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
        }
 
        config.mmio_name     = "core_physical";
-       config.ddc_clk_gpio  = get_gpio("qcom,hdmi-tx-ddc-clk");
-       config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
-       config.hpd_gpio      = get_gpio("qcom,hdmi-tx-hpd");
-       config.mux_en_gpio   = get_gpio("qcom,hdmi-tx-mux-en");
-       config.mux_sel_gpio  = get_gpio("qcom,hdmi-tx-mux-sel");
-       config.mux_lpm_gpio  = get_gpio("qcom,hdmi-tx-mux-lpm");
+       config.ddc_clk_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
+       config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
+       config.hpd_gpio      = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
+       config.mux_en_gpio   = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
+       config.mux_sel_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
+       config.mux_lpm_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
 
 #else
        static const char *hpd_clk_names[] = {
index 902d768..f408b69 100644 (file)
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#ifdef CONFIG_COMMON_CLK
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#endif
 
 #include "hdmi.h"
 
 struct hdmi_phy_8960 {
        struct hdmi_phy base;
        struct hdmi *hdmi;
+#ifdef CONFIG_COMMON_CLK
        struct clk_hw pll_hw;
        struct clk *pll;
        unsigned long pixclk;
+#endif
 };
 #define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+
+#ifdef CONFIG_COMMON_CLK
 #define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
 
 /*
@@ -374,7 +380,7 @@ static struct clk_init_data pll_init = {
        .parent_names = hdmi_pll_parents,
        .num_parents = ARRAY_SIZE(hdmi_pll_parents),
 };
-
+#endif
 
 /*
  * HDMI Phy:
@@ -480,12 +486,15 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
 {
        struct hdmi_phy_8960 *phy_8960;
        struct hdmi_phy *phy = NULL;
-       int ret, i;
+       int ret;
+#ifdef CONFIG_COMMON_CLK
+       int i;
 
        /* sanity check: */
        for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
                if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
                        return ERR_PTR(-EINVAL);
+#endif
 
        phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
        if (!phy_8960) {
@@ -499,6 +508,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
 
        phy_8960->hdmi = hdmi;
 
+#ifdef CONFIG_COMMON_CLK
        phy_8960->pll_hw.init = &pll_init;
        phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
        if (IS_ERR(phy_8960->pll)) {
@@ -506,6 +516,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
                phy_8960->pll = NULL;
                goto fail;
        }
+#endif
 
        return phy;
 
index 26ee80d..fcf9568 100644 (file)
@@ -52,7 +52,7 @@ module_param(reglog, bool, 0600);
 #define reglog 0
 #endif
 
-static char *vram;
+static char *vram = "16m";
 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
 module_param(vram, charp, 0);
 
index 0a44459..05a278b 100644 (file)
@@ -200,7 +200,6 @@ nvc0_bar_init(struct nouveau_object *object)
 
        nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
        nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
-       nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
 
        nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
        if (priv->bar[0].mem)
index b19a2b3..32f28dc 100644 (file)
@@ -60,6 +60,7 @@ nvc0_fb_init(struct nouveau_object *object)
 
        if (priv->r100c10_page)
                nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
+       nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
        return 0;
 }
 
index b54b582..d5d6528 100644 (file)
@@ -98,6 +98,7 @@ static int
 gf100_ltc_init(struct nouveau_object *object)
 {
        struct nvkm_ltc_priv *priv = (void *)object;
+       u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
        int ret;
 
        ret = nvkm_ltc_init(priv);
@@ -107,6 +108,7 @@ gf100_ltc_init(struct nouveau_object *object)
        nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
        nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
        nv_wr32(priv, 0x17e8d4, priv->tag_base);
+       nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
        return 0;
 }
 
index ea71656..b39b5d0 100644 (file)
@@ -28,6 +28,7 @@ static int
 gk104_ltc_init(struct nouveau_object *object)
 {
        struct nvkm_ltc_priv *priv = (void *)object;
+       u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
        int ret;
 
        ret = nvkm_ltc_init(priv);
@@ -37,6 +38,7 @@ gk104_ltc_init(struct nouveau_object *object)
        nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
        nv_wr32(priv, 0x17e000, priv->ltc_nr);
        nv_wr32(priv, 0x17e8d4, priv->tag_base);
+       nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
        return 0;
 }
 
index 4761b2e..a4de642 100644 (file)
@@ -98,6 +98,7 @@ static int
 gm107_ltc_init(struct nouveau_object *object)
 {
        struct nvkm_ltc_priv *priv = (void *)object;
+       u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
        int ret;
 
        ret = nvkm_ltc_init(priv);
@@ -106,6 +107,7 @@ gm107_ltc_init(struct nouveau_object *object)
 
        nv_wr32(priv, 0x17e27c, priv->ltc_nr);
        nv_wr32(priv, 0x17e278, priv->tag_base);
+       nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
        return 0;
 }
 
index 2792069..6224246 100644 (file)
@@ -46,7 +46,6 @@ static struct nouveau_dsm_priv {
        bool dsm_detected;
        bool optimus_detected;
        acpi_handle dhandle;
-       acpi_handle other_handle;
        acpi_handle rom_handle;
 } nouveau_dsm_priv;
 
@@ -222,10 +221,9 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
        if (!dhandle)
                return false;
 
-       if (!acpi_has_method(dhandle, "_DSM")) {
-               nouveau_dsm_priv.other_handle = dhandle;
+       if (!acpi_has_method(dhandle, "_DSM"))
                return false;
-       }
+
        if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
                           1 << NOUVEAU_DSM_POWER))
                retval |= NOUVEAU_DSM_HAS_MUX;
@@ -301,16 +299,6 @@ static bool nouveau_dsm_detect(void)
                printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
                        acpi_method_name);
                nouveau_dsm_priv.dsm_detected = true;
-               /*
-                * On some systems hotplug events are generated for the device
-                * being switched off when _DSM is executed.  They cause ACPI
-                * hotplug to trigger and attempt to remove the device from
-                * the system, which causes it to break down.  Prevent that from
-                * happening by setting the no_hotplug flag for the involved
-                * ACPI device objects.
-                */
-               acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
-               acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
                ret = true;
        }
 
index 250a5e8..9c3af96 100644 (file)
@@ -627,6 +627,7 @@ int nouveau_pmops_suspend(struct device *dev)
 
        pci_save_state(pdev);
        pci_disable_device(pdev);
+       pci_ignore_hotplug(pdev);
        pci_set_power_state(pdev, PCI_D3hot);
        return 0;
 }
index 18d55d4..c7592ec 100644 (file)
@@ -108,7 +108,16 @@ void
 nouveau_vga_fini(struct nouveau_drm *drm)
 {
        struct drm_device *dev = drm->dev;
+       bool runtime = false;
+
+       if (nouveau_runtime_pm == 1)
+               runtime = true;
+       if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
+               runtime = true;
+
        vga_switcheroo_unregister_client(dev->pdev);
+       if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
+               vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
        vga_client_register(dev->pdev, NULL, NULL, NULL);
 }
 
index b1e11f8..ac14b67 100644 (file)
@@ -405,16 +405,13 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
        u8 msg[DP_DPCD_SIZE];
        int ret;
 
-       char dpcd_hex_dump[DP_DPCD_SIZE * 3];
-
        ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
                               DP_DPCD_SIZE);
        if (ret > 0) {
                memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 
-               hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
-                                  32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
-               DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+               DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+                             dig_connector->dpcd);
 
                radeon_dp_probe_oui(radeon_connector);
 
index 192278b..c4ffa54 100644 (file)
@@ -489,13 +489,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
 {
        int r;
 
-       /* Reset dma */
-       WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
-       RREG32(SRBM_SOFT_RESET);
-       udelay(50);
-       WREG32(SRBM_SOFT_RESET, 0);
-       RREG32(SRBM_SOFT_RESET);
-
        r = cik_sdma_load_microcode(rdev);
        if (r)
                return r;
index 8b58e11..67cb472 100644 (file)
@@ -33,6 +33,8 @@
 #define KV_MINIMUM_ENGINE_CLOCK         800
 #define SMC_RAM_END                     0x40000
 
+static int kv_enable_nb_dpm(struct radeon_device *rdev,
+                           bool enable);
 static void kv_init_graphics_levels(struct radeon_device *rdev);
 static int kv_calculate_ds_divider(struct radeon_device *rdev);
 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
@@ -1295,6 +1297,9 @@ void kv_dpm_disable(struct radeon_device *rdev)
 {
        kv_smc_bapm_enable(rdev, false);
 
+       if (rdev->family == CHIP_MULLINS)
+               kv_enable_nb_dpm(rdev, false);
+
        /* powerup blocks */
        kv_dpm_powergate_acp(rdev, false);
        kv_dpm_powergate_samu(rdev, false);
@@ -1769,15 +1774,24 @@ static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
        return ret;
 }
 
-static int kv_enable_nb_dpm(struct radeon_device *rdev)
+static int kv_enable_nb_dpm(struct radeon_device *rdev,
+                           bool enable)
 {
        struct kv_power_info *pi = kv_get_pi(rdev);
        int ret = 0;
 
-       if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
-               ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
-               if (ret == 0)
-                       pi->nb_dpm_enabled = true;
+       if (enable) {
+               if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
+                       ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
+                       if (ret == 0)
+                               pi->nb_dpm_enabled = true;
+               }
+       } else {
+               if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
+                       ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
+                       if (ret == 0)
+                               pi->nb_dpm_enabled = false;
+               }
        }
 
        return ret;
@@ -1864,7 +1878,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
                        }
                        kv_update_sclk_t(rdev);
                        if (rdev->family == CHIP_MULLINS)
-                               kv_enable_nb_dpm(rdev);
+                               kv_enable_nb_dpm(rdev, true);
                }
        } else {
                if (pi->enable_dpm) {
@@ -1889,7 +1903,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
                        }
                        kv_update_acp_boot_level(rdev);
                        kv_update_sclk_t(rdev);
-                       kv_enable_nb_dpm(rdev);
+                       kv_enable_nb_dpm(rdev, true);
                }
        }
 
index 8a3e622..f26f0a9 100644 (file)
@@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
        u32 reg_offset, wb_offset;
        int i, r;
 
-       /* Reset dma */
-       WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
-       RREG32(SRBM_SOFT_RESET);
-       udelay(50);
-       WREG32(SRBM_SOFT_RESET, 0);
-
        for (i = 0; i < 2; i++) {
                if (i == 0) {
                        ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
index 4c5ec44..b0098e7 100644 (file)
@@ -821,6 +821,20 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
                return RREG32(RADEON_CRTC2_CRNT_FRAME);
 }
 
+/**
+ * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
+ * rdev: radeon device structure
+ * ring: ring buffer struct for emitting packets
+ */
+static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+                               RADEON_HDP_READ_BUFFER_INVALIDATE);
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+}
+
 /* Who ever call radeon_fence_emit should call ring_lock and ask
  * for enough space (today caller are ib schedule and buffer move) */
 void r100_fence_ring_emit(struct radeon_device *rdev,
@@ -1056,20 +1070,6 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
        (void)RREG32(RADEON_CP_RB_WPTR);
 }
 
-/**
- * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
- * rdev: radeon device structure
- * ring: ring buffer struct for emitting packets
- */
-void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
-{
-       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
-                               RADEON_HDP_READ_BUFFER_INVALIDATE);
-       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
-}
-
 static void r100_cp_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data;
index e616eb5..3cfb500 100644 (file)
@@ -2769,8 +2769,8 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, lower_32_bits(addr));
        radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
 
-       /* PFP_SYNC_ME packet only exists on 7xx+ */
-       if (emit_wait && (rdev->family >= CHIP_RV770)) {
+       /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
+       if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
                /* Prevent the PFP from running ahead of the semaphore wait */
                radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
                radeon_ring_write(ring, 0x0);
index 51fd985..a908daa 100644 (file)
@@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev)
        u32 rb_bufsz;
        int r;
 
-       /* Reset dma */
-       if (rdev->family >= CHIP_RV770)
-               WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
-       else
-               WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
-       RREG32(SRBM_SOFT_RESET);
-       udelay(50);
-       WREG32(SRBM_SOFT_RESET, 0);
-
        WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
        WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
 
index 0c4a7d8..31e1052 100644 (file)
 #define R6XX_MAX_PIPES                         8
 #define R6XX_MAX_PIPES_MASK                    0xff
 
-/* PTE flags */
-#define PTE_VALID                              (1 << 0)
-#define PTE_SYSTEM                             (1 << 1)
-#define PTE_SNOOPED                            (1 << 2)
-#define PTE_READABLE                           (1 << 5)
-#define PTE_WRITEABLE                          (1 << 6)
-
 /* tiling bits */
 #define     ARRAY_LINEAR_GENERAL              0x00000000
 #define     ARRAY_LINEAR_ALIGNED              0x00000001
index eeeeabe..2dd5847 100644 (file)
@@ -185,7 +185,6 @@ static struct radeon_asic_ring r100_gfx_ring = {
        .get_rptr = &r100_gfx_get_rptr,
        .get_wptr = &r100_gfx_get_wptr,
        .set_wptr = &r100_gfx_set_wptr,
-       .hdp_flush = &r100_ring_hdp_flush,
 };
 
 static struct radeon_asic r100_asic = {
@@ -332,7 +331,6 @@ static struct radeon_asic_ring r300_gfx_ring = {
        .get_rptr = &r100_gfx_get_rptr,
        .get_wptr = &r100_gfx_get_wptr,
        .set_wptr = &r100_gfx_set_wptr,
-       .hdp_flush = &r100_ring_hdp_flush,
 };
 
 static struct radeon_asic r300_asic = {
index 275a5dc..7756bc1 100644 (file)
@@ -148,8 +148,7 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
                      struct radeon_ring *ring);
 void r100_gfx_set_wptr(struct radeon_device *rdev,
                       struct radeon_ring *ring);
-void r100_ring_hdp_flush(struct radeon_device *rdev,
-                        struct radeon_ring *ring);
+
 /*
  * r200,rv250,rs300,rv280
  */
index 92b2d8d..e74c7e3 100644 (file)
@@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                }
        }
 
+       /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+       if ((dev->pdev->device == 0x9805) &&
+           (dev->pdev->subsystem_vendor == 0x1734) &&
+           (dev->pdev->subsystem_device == 0x11bd)) {
+               if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+                       return false;
+       }
 
        return true;
 }
@@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
                                 (controller->ucFanParameters &
                                  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
                        rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
-               } else if ((controller->ucType ==
-                           ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
-                          (controller->ucType ==
-                           ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
-                          (controller->ucType ==
-                           ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
-                       DRM_INFO("Special thermal controller config\n");
+               } else if (controller->ucType ==
+                          ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+                       DRM_INFO("External GPIO thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+               } else if (controller->ucType ==
+                          ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+                       DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+               } else if (controller->ucType ==
+                          ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+                       DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
                } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
                        DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
                                 pp_lib_thermal_controller_names[controller->ucType],
                                 controller->ucI2cAddress >> 1,
                                 (controller->ucFanParameters &
                                  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
                        i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
                        rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
                        if (rdev->pm.i2c_bus) {
index a9fb0d0..8bc7d0b 100644 (file)
@@ -33,7 +33,6 @@ static struct radeon_atpx_priv {
        bool atpx_detected;
        /* handle for device - and atpx */
        acpi_handle dhandle;
-       acpi_handle other_handle;
        struct radeon_atpx atpx;
 } radeon_atpx_priv;
 
@@ -453,10 +452,9 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
                return false;
 
        status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
-       if (ACPI_FAILURE(status)) {
-               radeon_atpx_priv.other_handle = dhandle;
+       if (ACPI_FAILURE(status))
                return false;
-       }
+
        radeon_atpx_priv.dhandle = dhandle;
        radeon_atpx_priv.atpx.handle = atpx_handle;
        return true;
@@ -540,16 +538,6 @@ static bool radeon_atpx_detect(void)
                printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
                       acpi_method_name);
                radeon_atpx_priv.atpx_detected = true;
-               /*
-                * On some systems hotplug events are generated for the device
-                * being switched off when ATPX is executed.  They cause ACPI
-                * hotplug to trigger and attempt to remove the device from
-                * the system, which causes it to break down.  Prevent that from
-                * happening by setting the no_hotplug flag for the involved
-                * ACPI device objects.
-                */
-               acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
-               acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
                return true;
        }
        return false;
index 6a219bc..75223dd 100644 (file)
@@ -1393,7 +1393,7 @@ int radeon_device_init(struct radeon_device *rdev,
 
        r = radeon_init(rdev);
        if (r)
-               return r;
+               goto failed;
 
        r = radeon_ib_ring_tests(rdev);
        if (r)
@@ -1413,7 +1413,7 @@ int radeon_device_init(struct radeon_device *rdev,
                radeon_agp_disable(rdev);
                r = radeon_init(rdev);
                if (r)
-                       return r;
+                       goto failed;
        }
 
        if ((radeon_testing & 1)) {
@@ -1435,6 +1435,11 @@ int radeon_device_init(struct radeon_device *rdev,
                        DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
        }
        return 0;
+
+failed:
+       if (runtime)
+               vga_switcheroo_fini_domain_pm_ops(rdev->dev);
+       return r;
 }
 
 static void radeon_debugfs_remove_files(struct radeon_device *rdev);
@@ -1455,6 +1460,8 @@ void radeon_device_fini(struct radeon_device *rdev)
        radeon_bo_evict_vram(rdev);
        radeon_fini(rdev);
        vga_switcheroo_unregister_client(rdev->pdev);
+       if (rdev->flags & RADEON_IS_PX)
+               vga_switcheroo_fini_domain_pm_ops(rdev->dev);
        vga_client_register(rdev->pdev, NULL, NULL, NULL);
        if (rdev->rio_mem)
                pci_iounmap(rdev->pdev, rdev->rio_mem);
index 8df8889..4126fd0 100644 (file)
@@ -83,7 +83,7 @@
  *            CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
  *   2.39.0 - Add INFO query for number of active CUs
  *   2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
- *            CS to GPU
+ *            CS to GPU on >= r600
  */
 #define KMS_DRIVER_MAJOR       2
 #define KMS_DRIVER_MINOR       40
@@ -440,6 +440,7 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
        ret = radeon_suspend_kms(drm_dev, false, false);
        pci_save_state(pdev);
        pci_disable_device(pdev);
+       pci_ignore_hotplug(pdev);
        pci_set_power_state(pdev, PCI_D3cold);
        drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
 
index 56d9fd6..abd6753 100644 (file)
@@ -34,7 +34,7 @@
 int radeon_semaphore_create(struct radeon_device *rdev,
                            struct radeon_semaphore **semaphore)
 {
-       uint32_t *cpu_addr;
+       uint64_t *cpu_addr;
        int i, r;
 
        *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
index 6c1fc33..c5799f1 100644 (file)
@@ -221,9 +221,9 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
        entry = (lower_32_bits(addr) & PAGE_MASK) |
                ((upper_32_bits(addr) & 0xff) << 4);
        if (flags & RADEON_GART_PAGE_READ)
-               addr |= RS400_PTE_READABLE;
+               entry |= RS400_PTE_READABLE;
        if (flags & RADEON_GART_PAGE_WRITE)
-               addr |= RS400_PTE_WRITEABLE;
+               entry |= RS400_PTE_WRITEABLE;
        if (!(flags & RADEON_GART_PAGE_SNOOP))
                entry |= RS400_PTE_UNSNOOPED;
        entry = cpu_to_le32(entry);
index ef93156..b22968c 100644 (file)
@@ -298,7 +298,6 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
        hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
 
        val = frame[0xC];
-       val |= frame[0xD] << 8;
        hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
 
        /* Enable transmission slot for AVI infoframe
index 6866448..37ac7b5 100644 (file)
@@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
 }
 EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
 
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
+{
+       dev->pm_domain = NULL;
+}
+EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
+
 static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
index d2077f0..7771162 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/poll.h>
 #include <linux/miscdevice.h>
 #include <linux/slab.h>
+#include <linux/screen_info.h>
 
 #include <linux/uaccess.h>
 
@@ -112,10 +113,8 @@ both:
        return 1;
 }
 
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
 /* this is only used a cookie - it should not be dereferenced */
 static struct pci_dev *vga_default;
-#endif
 
 static void vga_arb_device_card_gone(struct pci_dev *pdev);
 
@@ -131,7 +130,6 @@ static struct vga_device *vgadev_find(struct pci_dev *pdev)
 }
 
 /* Returns the default VGA device (vgacon's babe) */
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
 struct pci_dev *vga_default_device(void)
 {
        return vga_default;
@@ -147,7 +145,6 @@ void vga_set_default_device(struct pci_dev *pdev)
        pci_dev_put(vga_default);
        vga_default = pci_dev_get(pdev);
 }
-#endif
 
 static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
 {
@@ -583,11 +580,12 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
        /* Deal with VGA default device. Use first enabled one
         * by default if arch doesn't have it's own hook
         */
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
        if (vga_default == NULL &&
-           ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK))
+           ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
+               pr_info("vgaarb: setting as boot device: PCI:%s\n",
+                       pci_name(pdev));
                vga_set_default_device(pdev);
-#endif
+       }
 
        vga_arbiter_check_bridge_sharing(vgadev);
 
@@ -621,10 +619,8 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
                goto bail;
        }
 
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
        if (vga_default == pdev)
                vga_set_default_device(NULL);
-#endif
 
        if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
                vga_decode_count--;
@@ -1320,6 +1316,38 @@ static int __init vga_arb_device_init(void)
        pr_info("vgaarb: loaded\n");
 
        list_for_each_entry(vgadev, &vga_list, list) {
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+               /* Override I/O based detection done by vga_arbiter_add_pci_device()
+                * as it may take the wrong device (e.g. on Apple system under EFI).
+                *
+                * Select the device owning the boot framebuffer if there is one.
+                */
+               resource_size_t start, end;
+               int i;
+
+               /* Does firmware framebuffer belong to us? */
+               for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+                       if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM))
+                               continue;
+
+                       start = pci_resource_start(vgadev->pdev, i);
+                       end  = pci_resource_end(vgadev->pdev, i);
+
+                       if (!start || !end)
+                               continue;
+
+                       if (screen_info.lfb_base < start ||
+                           (screen_info.lfb_base + screen_info.lfb_size) >= end)
+                               continue;
+                       if (!vga_default_device())
+                               pr_info("vgaarb: setting as boot device: PCI:%s\n",
+                                       pci_name(vgadev->pdev));
+                       else if (vgadev->pdev != vga_default_device())
+                               pr_info("vgaarb: overriding boot device: PCI:%s\n",
+                                       pci_name(vgadev->pdev));
+                       vga_set_default_device(vgadev->pdev);
+               }
+#endif
                if (vgadev->bridge_has_one_vga)
                        pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
                else
index 4a7cbfa..fcdbde4 100644 (file)
@@ -93,13 +93,29 @@ static ssize_t show_power_crit(struct device *dev,
 }
 static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL);
 
+static umode_t fam15h_power_is_visible(struct kobject *kobj,
+                                      struct attribute *attr,
+                                      int index)
+{
+       /* power1_input is only reported for Fam15h, Models 00h-0fh */
+       if (attr == &dev_attr_power1_input.attr &&
+          (boot_cpu_data.x86 != 0x15 || boot_cpu_data.x86_model > 0xf))
+               return 0;
+
+       return attr->mode;
+}
+
 static struct attribute *fam15h_power_attrs[] = {
        &dev_attr_power1_input.attr,
        &dev_attr_power1_crit.attr,
        NULL
 };
 
-ATTRIBUTE_GROUPS(fam15h_power);
+static const struct attribute_group fam15h_power_group = {
+       .attrs = fam15h_power_attrs,
+       .is_visible = fam15h_power_is_visible,
+};
+__ATTRIBUTE_GROUPS(fam15h_power);
 
 static bool fam15h_power_is_internal_node0(struct pci_dev *f4)
 {
@@ -216,7 +232,9 @@ static int fam15h_power_probe(struct pci_dev *pdev,
 
 static const struct pci_device_id fam15h_power_id_table[] = {
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
index e42964f..ad571ec 100644 (file)
@@ -145,7 +145,7 @@ static int tmp103_probe(struct i2c_client *client,
        }
 
        i2c_set_clientdata(client, regmap);
-       hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
                                                      regmap, tmp103_groups);
        return PTR_ERR_OR_ZERO(hwmon_dev);
 }
index a077cc8..19100fd 100644 (file)
@@ -571,7 +571,7 @@ static int bma180_probe(struct i2c_client *client,
        trig->ops = &bma180_trigger_ops;
        iio_trigger_set_drvdata(trig, indio_dev);
        data->trig = trig;
-       indio_dev->trig = trig;
+       indio_dev->trig = iio_trigger_get(trig);
 
        ret = iio_trigger_register(trig);
        if (ret)
index c55b81f..d10bd0c 100644 (file)
@@ -472,7 +472,7 @@ static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
                goto error_free_irq;
 
        /* select default trigger */
-       indio_dev->trig = sigma_delta->trig;
+       indio_dev->trig = iio_trigger_get(sigma_delta->trig);
 
        return 0;
 
index 772e869..7eadaf1 100644 (file)
@@ -196,6 +196,7 @@ struct at91_adc_state {
        bool                    done;
        int                     irq;
        u16                     last_value;
+       int                     chnb;
        struct mutex            lock;
        u8                      num_channels;
        void __iomem            *reg_base;
@@ -274,7 +275,7 @@ void handle_adc_eoc_trigger(int irq, struct iio_dev *idev)
                disable_irq_nosync(irq);
                iio_trigger_poll(idev->trig);
        } else {
-               st->last_value = at91_adc_readl(st, AT91_ADC_LCDR);
+               st->last_value = at91_adc_readl(st, AT91_ADC_CHAN(st, st->chnb));
                st->done = true;
                wake_up_interruptible(&st->wq_data_avail);
        }
@@ -351,7 +352,7 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private)
        unsigned int reg;
 
        status &= at91_adc_readl(st, AT91_ADC_IMR);
-       if (status & st->registers->drdy_mask)
+       if (status & GENMASK(st->num_channels - 1, 0))
                handle_adc_eoc_trigger(irq, idev);
 
        if (status & AT91RL_ADC_IER_PEN) {
@@ -418,7 +419,7 @@ static irqreturn_t at91_adc_9x5_interrupt(int irq, void *private)
                AT91_ADC_IER_YRDY |
                AT91_ADC_IER_PRDY;
 
-       if (status & st->registers->drdy_mask)
+       if (status & GENMASK(st->num_channels - 1, 0))
                handle_adc_eoc_trigger(irq, idev);
 
        if (status & AT91_ADC_IER_PEN) {
@@ -689,9 +690,10 @@ static int at91_adc_read_raw(struct iio_dev *idev,
        case IIO_CHAN_INFO_RAW:
                mutex_lock(&st->lock);
 
+               st->chnb = chan->channel;
                at91_adc_writel(st, AT91_ADC_CHER,
                                AT91_ADC_CH(chan->channel));
-               at91_adc_writel(st, AT91_ADC_IER, st->registers->drdy_mask);
+               at91_adc_writel(st, AT91_ADC_IER, BIT(chan->channel));
                at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_START);
 
                ret = wait_event_interruptible_timeout(st->wq_data_avail,
@@ -708,7 +710,7 @@ static int at91_adc_read_raw(struct iio_dev *idev,
 
                at91_adc_writel(st, AT91_ADC_CHDR,
                                AT91_ADC_CH(chan->channel));
-               at91_adc_writel(st, AT91_ADC_IDR, st->registers->drdy_mask);
+               at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
 
                st->last_value = 0;
                st->done = false;
index fd2745c..626b397 100644 (file)
@@ -1126,7 +1126,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
                                chan->address = XADC_REG_VPVN;
                        } else {
                                chan->scan_index = 15 + reg;
-                               chan->scan_index = XADC_REG_VAUX(reg - 1);
+                               chan->address = XADC_REG_VAUX(reg - 1);
                        }
                        num_channels++;
                        chan++;
index a3109a6..92068cd 100644 (file)
@@ -122,7 +122,8 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
                dev_err(&indio_dev->dev, "Trigger Register Failed\n");
                goto error_free_trig;
        }
-       indio_dev->trig = attrb->trigger = trig;
+       attrb->trigger = trig;
+       indio_dev->trig = iio_trigger_get(trig);
 
        return ret;
 
index 8fc3a97..8d8ca6f 100644 (file)
@@ -49,7 +49,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
                goto iio_trigger_register_error;
        }
-       indio_dev->trig = sdata->trig;
+       indio_dev->trig = iio_trigger_get(sdata->trig);
 
        return 0;
 
index e3b3c50..eef50e9 100644 (file)
@@ -132,7 +132,7 @@ int itg3200_probe_trigger(struct iio_dev *indio_dev)
                goto error_free_irq;
 
        /* select default trigger */
-       indio_dev->trig = st->trig;
+       indio_dev->trig = iio_trigger_get(st->trig);
 
        return 0;
 
index 03b9372..926fcce 100644 (file)
@@ -135,7 +135,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
        ret = iio_trigger_register(st->trig);
        if (ret)
                goto error_free_irq;
-       indio_dev->trig = st->trig;
+       indio_dev->trig = iio_trigger_get(st->trig);
 
        return 0;
 
index c749700..f084610 100644 (file)
@@ -178,7 +178,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
                        index = of_property_match_string(np, "io-channel-names",
                                                         name);
                chan = of_iio_channel_get(np, index);
-               if (!IS_ERR(chan))
+               if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
                        break;
                else if (name && index >= 0) {
                        pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
index a4b6413..68cae86 100644 (file)
@@ -42,7 +42,8 @@
 #define ST_MAGN_FS_AVL_5600MG                  5600
 #define ST_MAGN_FS_AVL_8000MG                  8000
 #define ST_MAGN_FS_AVL_8100MG                  8100
-#define ST_MAGN_FS_AVL_10000MG                 10000
+#define ST_MAGN_FS_AVL_12000MG                 12000
+#define ST_MAGN_FS_AVL_16000MG                 16000
 
 /* CUSTOM VALUES FOR SENSOR 1 */
 #define ST_MAGN_1_WAI_EXP                      0x3c
 #define ST_MAGN_1_FS_AVL_4700_VAL              0x05
 #define ST_MAGN_1_FS_AVL_5600_VAL              0x06
 #define ST_MAGN_1_FS_AVL_8100_VAL              0x07
-#define ST_MAGN_1_FS_AVL_1300_GAIN_XY          1100
-#define ST_MAGN_1_FS_AVL_1900_GAIN_XY          855
-#define ST_MAGN_1_FS_AVL_2500_GAIN_XY          670
-#define ST_MAGN_1_FS_AVL_4000_GAIN_XY          450
-#define ST_MAGN_1_FS_AVL_4700_GAIN_XY          400
-#define ST_MAGN_1_FS_AVL_5600_GAIN_XY          330
-#define ST_MAGN_1_FS_AVL_8100_GAIN_XY          230
-#define ST_MAGN_1_FS_AVL_1300_GAIN_Z           980
-#define ST_MAGN_1_FS_AVL_1900_GAIN_Z           760
-#define ST_MAGN_1_FS_AVL_2500_GAIN_Z           600
-#define ST_MAGN_1_FS_AVL_4000_GAIN_Z           400
-#define ST_MAGN_1_FS_AVL_4700_GAIN_Z           355
-#define ST_MAGN_1_FS_AVL_5600_GAIN_Z           295
-#define ST_MAGN_1_FS_AVL_8100_GAIN_Z           205
+#define ST_MAGN_1_FS_AVL_1300_GAIN_XY          909
+#define ST_MAGN_1_FS_AVL_1900_GAIN_XY          1169
+#define ST_MAGN_1_FS_AVL_2500_GAIN_XY          1492
+#define ST_MAGN_1_FS_AVL_4000_GAIN_XY          2222
+#define ST_MAGN_1_FS_AVL_4700_GAIN_XY          2500
+#define ST_MAGN_1_FS_AVL_5600_GAIN_XY          3030
+#define ST_MAGN_1_FS_AVL_8100_GAIN_XY          4347
+#define ST_MAGN_1_FS_AVL_1300_GAIN_Z           1020
+#define ST_MAGN_1_FS_AVL_1900_GAIN_Z           1315
+#define ST_MAGN_1_FS_AVL_2500_GAIN_Z           1666
+#define ST_MAGN_1_FS_AVL_4000_GAIN_Z           2500
+#define ST_MAGN_1_FS_AVL_4700_GAIN_Z           2816
+#define ST_MAGN_1_FS_AVL_5600_GAIN_Z           3389
+#define ST_MAGN_1_FS_AVL_8100_GAIN_Z           4878
 #define ST_MAGN_1_MULTIREAD_BIT                        false
 
 /* CUSTOM VALUES FOR SENSOR 2 */
 #define ST_MAGN_2_FS_MASK                      0x60
 #define ST_MAGN_2_FS_AVL_4000_VAL              0x00
 #define ST_MAGN_2_FS_AVL_8000_VAL              0x01
-#define ST_MAGN_2_FS_AVL_10000_VAL             0x02
-#define ST_MAGN_2_FS_AVL_4000_GAIN             430
-#define ST_MAGN_2_FS_AVL_8000_GAIN             230
-#define ST_MAGN_2_FS_AVL_10000_GAIN            230
+#define ST_MAGN_2_FS_AVL_12000_VAL             0x02
+#define ST_MAGN_2_FS_AVL_16000_VAL             0x03
+#define ST_MAGN_2_FS_AVL_4000_GAIN             146
+#define ST_MAGN_2_FS_AVL_8000_GAIN             292
+#define ST_MAGN_2_FS_AVL_12000_GAIN            438
+#define ST_MAGN_2_FS_AVL_16000_GAIN            584
 #define ST_MAGN_2_MULTIREAD_BIT                        false
 #define ST_MAGN_2_OUT_X_L_ADDR                 0x28
 #define ST_MAGN_2_OUT_Y_L_ADDR                 0x2a
@@ -266,9 +269,14 @@ static const struct st_sensors st_magn_sensors[] = {
                                        .gain = ST_MAGN_2_FS_AVL_8000_GAIN,
                                },
                                [2] = {
-                                       .num = ST_MAGN_FS_AVL_10000MG,
-                                       .value = ST_MAGN_2_FS_AVL_10000_VAL,
-                                       .gain = ST_MAGN_2_FS_AVL_10000_GAIN,
+                                       .num = ST_MAGN_FS_AVL_12000MG,
+                                       .value = ST_MAGN_2_FS_AVL_12000_VAL,
+                                       .gain = ST_MAGN_2_FS_AVL_12000_GAIN,
+                               },
+                               [3] = {
+                                       .num = ST_MAGN_FS_AVL_16000MG,
+                                       .value = ST_MAGN_2_FS_AVL_16000_VAL,
+                                       .gain = ST_MAGN_2_FS_AVL_16000_GAIN,
                                },
                        },
                },
index a3a2e9c..df0c4f6 100644 (file)
@@ -105,6 +105,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        umem->length    = size;
        umem->offset    = addr & ~PAGE_MASK;
        umem->page_size = PAGE_SIZE;
+       umem->pid       = get_task_pid(current, PIDTYPE_PID);
        /*
         * We ask for writable memory if any access flags other than
         * "remote read" are set.  "Local write" and "remote write"
@@ -198,6 +199,7 @@ out:
        if (ret < 0) {
                if (need_release)
                        __ib_umem_release(context->device, umem, 0);
+               put_pid(umem->pid);
                kfree(umem);
        } else
                current->mm->pinned_vm = locked;
@@ -230,15 +232,19 @@ void ib_umem_release(struct ib_umem *umem)
 {
        struct ib_ucontext *context = umem->context;
        struct mm_struct *mm;
+       struct task_struct *task;
        unsigned long diff;
 
        __ib_umem_release(umem->context->device, umem, 1);
 
-       mm = get_task_mm(current);
-       if (!mm) {
-               kfree(umem);
-               return;
-       }
+       task = get_pid_task(umem->pid, PIDTYPE_PID);
+       put_pid(umem->pid);
+       if (!task)
+               goto out;
+       mm = get_task_mm(task);
+       put_task_struct(task);
+       if (!mm)
+               goto out;
 
        diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
 
@@ -262,9 +268,10 @@ void ib_umem_release(struct ib_umem *umem)
        } else
                down_write(&mm->mmap_sem);
 
-       current->mm->pinned_vm -= diff;
+       mm->pinned_vm -= diff;
        up_write(&mm->mmap_sem);
        mmput(mm);
+out:
        kfree(umem);
 }
 EXPORT_SYMBOL(ib_umem_release);
index e7bee46..abd9724 100644 (file)
@@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst,
        dst->packet_life_time   = src->packet_life_time;
        dst->preference         = src->preference;
        dst->packet_life_time_selector = src->packet_life_time_selector;
+
+       memset(dst->smac, 0, sizeof(dst->smac));
+       memset(dst->dmac, 0, sizeof(dst->dmac));
+       dst->vlan_id = 0xffff;
 }
 EXPORT_SYMBOL(ib_copy_path_rec_from_user);
index dc66c45..1da1252 100644 (file)
@@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
 
 /* call with current->mm->mmap_sem held */
 static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
-                                 struct page **p, struct vm_area_struct **vma)
+                                 struct page **p)
 {
        unsigned long lock_limit;
        size_t got;
@@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
                ret = get_user_pages(current, current->mm,
                                     start_page + got * PAGE_SIZE,
                                     num_pages - got, 1, 1,
-                                    p + got, vma);
+                                    p + got, NULL);
                if (ret < 0)
                        goto bail_release;
        }
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
 
        down_write(&current->mm->mmap_sem);
 
-       ret = __ipath_get_user_pages(start_page, num_pages, p, NULL);
+       ret = __ipath_get_user_pages(start_page, num_pages, p);
 
        up_write(&current->mm->mmap_sem);
 
index af82563..bda5994 100644 (file)
@@ -59,6 +59,7 @@
 
 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
+#define MLX4_IB_CARD_REV_A0   0xA0
 
 MODULE_AUTHOR("Roland Dreier");
 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev)
        return dmfs;
 }
 
+static int num_ib_ports(struct mlx4_dev *dev)
+{
+       int ib_ports = 0;
+       int i;
+
+       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+               ib_ports++;
+
+       return ib_ports;
+}
+
 static int mlx4_ib_query_device(struct ib_device *ibdev,
                                struct ib_device_attr *props)
 {
@@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        struct ib_smp *in_mad  = NULL;
        struct ib_smp *out_mad = NULL;
        int err = -ENOMEM;
+       int have_ib_ports;
 
        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
 
        memset(props, 0, sizeof *props);
 
+       have_ib_ports = num_ib_ports(dev->dev);
+
        props->fw_ver = dev->dev->caps.fw_ver;
        props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
                IB_DEVICE_PORT_ACTIVE_EVENT             |
@@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
                props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
                props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
-       if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
+       if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
                props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
                props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
                props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
-       if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
+       if (dev->dev->caps.max_gso_sz &&
+           (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
+           (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
                props->device_cap_flags |= IB_DEVICE_UD_TSO;
        if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
                props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
@@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
        props->state            = IB_PORT_DOWN;
        props->phys_state       = state_to_phys_state(props->state);
        props->active_mtu       = IB_MTU_256;
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
        ndev = iboe->netdevs[port - 1];
        if (!ndev)
                goto out_unlock;
@@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
                                        IB_PORT_ACTIVE : IB_PORT_DOWN;
        props->phys_state       = state_to_phys_state(props->state);
 out_unlock:
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
 out:
        mlx4_free_cmd_mailbox(mdev->dev, mailbox);
        return err;
@@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
        if (!mqp->port)
                return 0;
 
-       spin_lock(&mdev->iboe.lock);
+       spin_lock_bh(&mdev->iboe.lock);
        ndev = mdev->iboe.netdevs[mqp->port - 1];
        if (ndev)
                dev_hold(ndev);
-       spin_unlock(&mdev->iboe.lock);
+       spin_unlock_bh(&mdev->iboe.lock);
 
        if (ndev) {
                ret = 1;
@@ -1292,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        mutex_lock(&mqp->mutex);
        ge = find_gid_entry(mqp, gid->raw);
        if (ge) {
-               spin_lock(&mdev->iboe.lock);
+               spin_lock_bh(&mdev->iboe.lock);
                ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
                if (ndev)
                        dev_hold(ndev);
-               spin_unlock(&mdev->iboe.lock);
+               spin_unlock_bh(&mdev->iboe.lock);
                if (ndev)
                        dev_put(ndev);
                list_del(&ge->list);
@@ -1417,6 +1434,9 @@ static void update_gids_task(struct work_struct *work)
        int err;
        struct mlx4_dev *dev = gw->dev->dev;
 
+       if (!gw->dev->ib_active)
+               return;
+
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
                pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
@@ -1447,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work)
        int err;
        struct mlx4_dev *dev = gw->dev->dev;
 
+       if (!gw->dev->ib_active)
+               return;
+
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
                pr_warn("reset gid table failed\n");
@@ -1581,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
                return 0;
 
        iboe = &ibdev->iboe;
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
 
        for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
                if ((netif_is_bond_master(real_dev) &&
@@ -1591,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
                        update_gid_table(ibdev, port, gid,
                                         event == NETDEV_DOWN, 0);
 
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
        return 0;
 
 }
@@ -1664,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
        new_smac = mlx4_mac_to_u64(dev->dev_addr);
        read_unlock(&dev_base_lock);
 
+       atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
+
+       /* no need for update QP1 and mac registration in non-SRIOV */
+       if (!mlx4_is_mfunc(ibdev->dev))
+               return;
+
        mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
        qp = ibdev->qp1_proxy[port - 1];
        if (qp) {
                int new_smac_index;
-               u64 old_smac = qp->pri.smac;
+               u64 old_smac;
                struct mlx4_update_qp_params update_params;
 
+               mutex_lock(&qp->mutex);
+               old_smac = qp->pri.smac;
                if (new_smac == old_smac)
                        goto unlock;
 
@@ -1680,22 +1711,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
                        goto unlock;
 
                update_params.smac_index = new_smac_index;
-               if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+               if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
                                   &update_params)) {
                        release_mac = new_smac;
                        goto unlock;
                }
-
+               /* if old port was zero, no mac was yet registered for this QP */
+               if (qp->pri.smac_port)
+                       release_mac = old_smac;
                qp->pri.smac = new_smac;
+               qp->pri.smac_port = port;
                qp->pri.smac_index = new_smac_index;
-
-               release_mac = old_smac;
        }
 
 unlock:
-       mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
        if (release_mac != MLX4_IB_INVALID_MAC)
                mlx4_unregister_mac(ibdev->dev, port, release_mac);
+       if (qp)
+               mutex_unlock(&qp->mutex);
+       mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
 }
 
 static void mlx4_ib_get_dev_addr(struct net_device *dev,
@@ -1706,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
        struct inet6_dev *in6_dev;
        union ib_gid  *pgid;
        struct inet6_ifaddr *ifp;
+       union ib_gid default_gid;
 #endif
        union ib_gid gid;
 
@@ -1726,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
                in_dev_put(in_dev);
        }
 #if IS_ENABLED(CONFIG_IPV6)
+       mlx4_make_default_gid(dev, &default_gid);
        /* IPv6 gids */
        in6_dev = in6_dev_get(dev);
        if (in6_dev) {
                read_lock_bh(&in6_dev->lock);
                list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
                        pgid = (union ib_gid *)&ifp->addr;
+                       if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
+                               continue;
                        update_gid_table(ibdev, port, pgid, 0, 0);
                }
                read_unlock_bh(&in6_dev->lock);
@@ -1753,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
        struct  net_device *dev;
        struct mlx4_ib_iboe *iboe = &ibdev->iboe;
        int i;
+       int err = 0;
 
-       for (i = 1; i <= ibdev->num_ports; ++i)
-               if (reset_gid_table(ibdev, i))
-                       return -1;
+       for (i = 1; i <= ibdev->num_ports; ++i) {
+               if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
+                   IB_LINK_LAYER_ETHERNET) {
+                       err = reset_gid_table(ibdev, i);
+                       if (err)
+                               goto out;
+               }
+       }
 
        read_lock(&dev_base_lock);
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
 
        for_each_netdev(&init_net, dev) {
                u8 port = mlx4_ib_get_dev_port(dev, ibdev);
-               if (port)
+               /* port will be non-zero only for ETH ports */
+               if (port) {
+                       mlx4_ib_set_default_gid(ibdev, dev, port);
                        mlx4_ib_get_dev_addr(dev, ibdev, port);
+               }
        }
 
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
        read_unlock(&dev_base_lock);
-
-       return 0;
+out:
+       return err;
 }
 
 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
@@ -1784,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
 
        iboe = &ibdev->iboe;
 
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
        mlx4_foreach_ib_transport_port(port, ibdev->dev) {
                enum ib_port_state      port_state = IB_PORT_NOP;
                struct net_device *old_master = iboe->masters[port - 1];
@@ -1816,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
                        port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
                                                IB_PORT_ACTIVE : IB_PORT_DOWN;
                        mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-               } else {
-                       reset_gid_table(ibdev, port);
-               }
-               /* if using bonding/team and a slave port is down, we don't the bond IP
-                * based gids in the table since flows that select port by gid may get
-                * the down port.
-                */
-               if (curr_master && (port_state == IB_PORT_DOWN)) {
-                       reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-               }
-               /* if bonding is used it is possible that we add it to masters
-                * only after IP address is assigned to the net bonding
-                * interface.
-               */
-               if (curr_master && (old_master != curr_master)) {
-                       reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-                       mlx4_ib_get_dev_addr(curr_master, ibdev, port);
-               }
+                       if (curr_master) {
+                               /* if using bonding/team and a slave port is down, we
+                                * don't want the bond IP based gids in the table since
+                                * flows that select port by gid may get the down port.
+                               */
+                               if (port_state == IB_PORT_DOWN) {
+                                       reset_gid_table(ibdev, port);
+                                       mlx4_ib_set_default_gid(ibdev,
+                                                               curr_netdev,
+                                                               port);
+                               } else {
+                                       /* gids from the upper dev (bond/team)
+                                        * should appear in port's gid table
+                                       */
+                                       mlx4_ib_get_dev_addr(curr_master,
+                                                            ibdev, port);
+                               }
+                       }
+                       /* if bonding is used it is possible that we add it to
+                        * masters only after IP address is assigned to the
+                        * net bonding interface.
+                       */
+                       if (curr_master && (old_master != curr_master)) {
+                               reset_gid_table(ibdev, port);
+                               mlx4_ib_set_default_gid(ibdev,
+                                                       curr_netdev, port);
+                               mlx4_ib_get_dev_addr(curr_master, ibdev, port);
+                       }
 
-               if (!curr_master && (old_master != curr_master)) {
+                       if (!curr_master && (old_master != curr_master)) {
+                               reset_gid_table(ibdev, port);
+                               mlx4_ib_set_default_gid(ibdev,
+                                                       curr_netdev, port);
+                               mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+                       }
+               } else {
                        reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-                       mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
                }
        }
 
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
 
        if (update_qps_port > 0)
                mlx4_ib_update_qps(ibdev, dev, update_qps_port);
@@ -2186,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                        goto err_steer_free_bitmap;
        }
 
+       for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
+               atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
+
        if (ib_register_device(&ibdev->ib_dev, NULL))
                goto err_steer_free_bitmap;
 
@@ -2222,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                        }
                }
 #endif
-               for (i = 1 ; i <= ibdev->num_ports ; ++i)
-                       reset_gid_table(ibdev, i);
-               rtnl_lock();
-               mlx4_ib_scan_netdevs(ibdev, NULL, 0);
-               rtnl_unlock();
-               mlx4_ib_init_gid_table(ibdev);
+               if (mlx4_ib_init_gid_table(ibdev))
+                       goto err_notif;
        }
 
        for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -2375,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
        struct mlx4_ib_dev *ibdev = ibdev_ptr;
        int p;
 
+       ibdev->ib_active = false;
+       flush_workqueue(wq);
+
        mlx4_ib_close_sriov(ibdev);
        mlx4_ib_mad_cleanup(ibdev);
        ib_unregister_device(&ibdev->ib_dev);
index e8cad39..6eb743f 100644 (file)
@@ -451,6 +451,7 @@ struct mlx4_ib_iboe {
        spinlock_t              lock;
        struct net_device      *netdevs[MLX4_MAX_PORTS];
        struct net_device      *masters[MLX4_MAX_PORTS];
+       atomic64_t              mac[MLX4_MAX_PORTS];
        struct notifier_block   nb;
        struct notifier_block   nb_inet;
        struct notifier_block   nb_inet6;
index 9b0e80e..8f9325c 100644 (file)
@@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
                                        0);
                if (IS_ERR(mmr->umem)) {
                        err = PTR_ERR(mmr->umem);
+                       /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
                        mmr->umem = NULL;
                        goto release_mpt_entry;
                }
                n = ib_umem_page_count(mmr->umem);
                shift = ilog2(mmr->umem->page_size);
 
-               mmr->mmr.iova       = virt_addr;
-               mmr->mmr.size       = length;
                err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
                                              virt_addr, length, n, shift,
                                              *pmpt_entry);
@@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
                        ib_umem_release(mmr->umem);
                        goto release_mpt_entry;
                }
+               mmr->mmr.iova       = virt_addr;
+               mmr->mmr.size       = length;
 
                err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
                if (err) {
@@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
         * return a failure. But dereg_mr will free the resources.
         */
        err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
+       if (!err && flags & IB_MR_REREG_ACCESS)
+               mmr->mmr.access = mr_access_flags;
 
 release_mpt_entry:
        mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
index efb9eff..9c5150c 100644 (file)
@@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
                                   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
                        pr_warn("modify QP %06x to RESET failed.\n",
                               qp->mqp.qpn);
-               if (qp->pri.smac) {
+               if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
                        mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
                        qp->pri.smac = 0;
+                       qp->pri.smac_port = 0;
                }
                if (qp->alt.smac) {
                        mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
                 * If one was already assigned, but the new mac differs,
                 * unregister the old one and register the new one.
                */
-               if (!smac_info->smac || smac_info->smac != smac) {
+               if ((!smac_info->smac && !smac_info->smac_port) ||
+                   smac_info->smac != smac) {
                        /* register candidate now, unreg if needed, after success */
                        smac_index = mlx4_register_mac(dev->dev, port, smac);
                        if (smac_index >= 0) {
@@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
 static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac,
                                    struct mlx4_qp_context *context)
 {
-       struct net_device *ndev;
        u64 u64_mac;
        int smac_index;
 
-
-       ndev = dev->iboe.netdevs[qp->port - 1];
-       if (ndev) {
-               smac = ndev->dev_addr;
-               u64_mac = mlx4_mac_to_u64(smac);
-       } else {
-               u64_mac = dev->dev->caps.def_mac[qp->port];
-       }
+       u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
 
        context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
-       if (!qp->pri.smac) {
+       if (!qp->pri.smac && !qp->pri.smac_port) {
                smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
                if (smac_index >= 0) {
                        qp->pri.candidate_smac_index = smac_index;
@@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
        int steer_qp = 0;
        int err = -EINVAL;
 
+       /* APM is not supported under RoCE */
+       if (attr_mask & IB_QP_ALT_PATH &&
+           rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
+           IB_LINK_LAYER_ETHERNET)
+               return -ENOTSUPP;
+
        context = kzalloc(sizeof *context, GFP_KERNEL);
        if (!context)
                return -ENOMEM;
@@ -1682,7 +1682,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                                        MLX4_IB_LINK_TYPE_ETH;
                if (dev->dev->caps.tunnel_offload_mode ==  MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
                        /* set QP to receive both tunneled & non-tunneled packets */
-                       if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+                       if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
                                context->srqn = cpu_to_be32(7 << 28);
                }
        }
@@ -1786,9 +1786,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                        if (qp->flags & MLX4_IB_QP_NETIF)
                                mlx4_ib_steer_qp_reg(dev, qp, 0);
                }
-               if (qp->pri.smac) {
+               if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
                        mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
                        qp->pri.smac = 0;
+                       qp->pri.smac_port = 0;
                }
                if (qp->alt.smac) {
                        mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1812,11 +1813,12 @@ out:
        if (err && steer_qp)
                mlx4_ib_steer_qp_reg(dev, qp, 0);
        kfree(context);
-       if (qp->pri.candidate_smac) {
+       if (qp->pri.candidate_smac ||
+           (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
                if (err) {
                        mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
                } else {
-                       if (qp->pri.smac)
+                       if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
                                mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
                        qp->pri.smac = qp->pri.candidate_smac;
                        qp->pri.smac_index = qp->pri.candidate_smac_index;
@@ -2089,6 +2091,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
        return 0;
 }
 
+static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac)
+{
+       int i;
+
+       for (i = ETH_ALEN; i; i--) {
+               dst_mac[i - 1] = src_mac & 0xff;
+               src_mac >>= 8;
+       }
+}
+
 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                            void *wqe, unsigned *mlx_seg_len)
 {
@@ -2203,7 +2215,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
        }
 
        if (is_eth) {
-               u8 *smac;
                struct in6_addr in6;
 
                u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
@@ -2216,12 +2227,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
                memcpy(&in6, sgid.raw, sizeof(in6));
 
-               if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev))
-                       smac = to_mdev(sqp->qp.ibqp.device)->
-                               iboe.netdevs[sqp->qp.port - 1]->dev_addr;
-               else    /* use the src mac of the tunnel */
-                       smac = ah->av.eth.s_mac;
-               memcpy(sqp->ud_header.eth.smac_h, smac, 6);
+               if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+                       u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]);
+                       u8 smac[ETH_ALEN];
+
+                       mlx4_u64_to_smac(smac, mac);
+                       memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN);
+               } else {
+                       /* use the src mac of the tunnel */
+                       memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN);
+               }
+
                if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
                        mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
                if (!is_vlan) {
index 40f8536..ac02ce4 100644 (file)
@@ -38,7 +38,7 @@
 #define OCRDMA_VID_PCP_SHIFT   0xD
 
 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
-                               struct ib_ah_attr *attr, int pdid)
+                       struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
 {
        int status = 0;
        u16 vlan_tag; bool vlan_enabled = false;
@@ -49,8 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        memset(&eth, 0, sizeof(eth));
        memset(&grh, 0, sizeof(grh));
 
-       ah->sgid_index = attr->grh.sgid_index;
-
+       /* VLAN */
        vlan_tag = attr->vlan_id;
        if (!vlan_tag || (vlan_tag > 0xFFF))
                vlan_tag = dev->pvid;
@@ -65,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
                eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
                eth_sz = sizeof(struct ocrdma_eth_basic);
        }
+       /* MAC */
        memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
-       memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
        status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
        if (status)
                return status;
-       status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,
-                       (union ib_gid *)&grh.sgid[0]);
-       if (status)
-               return status;
+       ah->sgid_index = attr->grh.sgid_index;
+       memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
+       memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
 
        grh.tclass_flow = cpu_to_be32((6 << 28) |
                        (attr->grh.traffic_class << 24) |
@@ -81,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        /* 0x1b is next header value in GRH */
        grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
                        (0x1b << 8) | attr->grh.hop_limit);
-
-       memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
+       /* Eth HDR */
        memcpy(&ah->av->eth_hdr, &eth, eth_sz);
        memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
        if (vlan_enabled)
@@ -98,6 +95,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
        struct ocrdma_ah *ah;
        struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
        struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+       union ib_gid sgid;
+       u8 zmac[ETH_ALEN];
 
        if (!(attr->ah_flags & IB_AH_GRH))
                return ERR_PTR(-EINVAL);
@@ -111,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
        status = ocrdma_alloc_av(dev, ah);
        if (status)
                goto av_err;
-       status = set_av_attr(dev, ah, attr, pd->id);
+
+       status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid);
+       if (status) {
+               pr_err("%s(): Failed to query sgid, status = %d\n",
+                     __func__, status);
+               goto av_conf_err;
+       }
+
+       memset(&zmac, 0, ETH_ALEN);
+       if (pd->uctx &&
+           memcmp(attr->dmac, &zmac, ETH_ALEN)) {
+               status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
+                                        attr->dmac, &attr->vlan_id);
+               if (status) {
+                       pr_err("%s(): Failed to resolve dmac from gid." 
+                               "status = %d\n", __func__, status);
+                       goto av_conf_err;
+               }
+       }
+
+       status = set_av_attr(dev, ah, attr, &sgid, pd->id);
        if (status)
                goto av_conf_err;
 
@@ -145,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
        struct ocrdma_av *av = ah->av;
        struct ocrdma_grh *grh;
        attr->ah_flags |= IB_AH_GRH;
-       if (ah->av->valid & Bit(1)) {
+       if (ah->av->valid & OCRDMA_AV_VALID) {
                grh = (struct ocrdma_grh *)((u8 *)ah->av +
                                sizeof(struct ocrdma_eth_vlan));
                attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
index acb434d..8f5f257 100644 (file)
@@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
        attr->max_srq_sge = dev->attr.max_srq_sge;
        attr->max_srq_wr = dev->attr.max_rqe;
        attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
-       attr->max_fast_reg_page_list_len = 0;
+       attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
        attr->max_pkeys = 1;
        return 0;
 }
@@ -2846,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
        if (cq->first_arm) {
                ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
                cq->first_arm = false;
-               goto skip_defer;
        }
-       cq->deferred_arm = true;
 
-skip_defer:
+       cq->deferred_arm = true;
        cq->deferred_sol = sol_needed;
        spin_unlock_irqrestore(&cq->cq_lock, flags);
 
index 799a0c3..6abd3ed 100644 (file)
@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
        struct qib_qp_iter *iter;
        loff_t n = *pos;
 
+       rcu_read_lock();
        iter = qib_qp_iter_init(s->private);
        if (!iter)
                return NULL;
@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 
 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
 {
-       /* nothing for now */
+       rcu_read_unlock();
 }
 
 static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
index 7fcc150..6ddc026 100644 (file)
@@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
        struct qib_qp *pqp = iter->qp;
        struct qib_qp *qp;
 
-       rcu_read_lock();
        for (; n < dev->qp_table_size; n++) {
                if (pqp)
                        qp = rcu_dereference(pqp->next);
@@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
                        qp = rcu_dereference(dev->qp_table[n]);
                pqp = qp;
                if (qp) {
-                       if (iter->qp)
-                               atomic_dec(&iter->qp->refcount);
-                       atomic_inc(&qp->refcount);
-                       rcu_read_unlock();
                        iter->qp = qp;
                        iter->n = n;
                        return 0;
                }
        }
-       rcu_read_unlock();
-       if (iter->qp)
-               atomic_dec(&iter->qp->refcount);
        return ret;
 }
 
index 2bc1d2b..74f90b2 100644 (file)
@@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
  * Call with current->mm->mmap_sem held.
  */
 static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
-                               struct page **p, struct vm_area_struct **vma)
+                               struct page **p)
 {
        unsigned long lock_limit;
        size_t got;
@@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
                ret = get_user_pages(current, current->mm,
                                     start_page + got * PAGE_SIZE,
                                     num_pages - got, 1, 1,
-                                    p + got, vma);
+                                    p + got, NULL);
                if (ret < 0)
                        goto bail_release;
        }
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
 
        down_write(&current->mm->mmap_sem);
 
-       ret = __qib_get_user_pages(start_page, num_pages, p, NULL);
+       ret = __qib_get_user_pages(start_page, num_pages, p);
 
        up_write(&current->mm->mmap_sem);
 
index 3edce61..d7562be 100644 (file)
@@ -131,6 +131,12 @@ struct ipoib_cb {
        u8                      hwaddr[INFINIBAND_ALEN];
 };
 
+static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
+       return (struct ipoib_cb *)skb->cb;
+}
+
 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
 struct ipoib_mcast {
        struct ib_sa_mcmember_rec mcmember;
index 1310acf..13e6e04 100644 (file)
@@ -716,7 +716,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_neigh *neigh;
-       struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+       struct ipoib_cb *cb = ipoib_skb_cb(skb);
        struct ipoib_header *header;
        unsigned long flags;
 
@@ -813,7 +813,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
                             const void *daddr, const void *saddr, unsigned len)
 {
        struct ipoib_header *header;
-       struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+       struct ipoib_cb *cb = ipoib_skb_cb(skb);
 
        header = (struct ipoib_header *) skb_push(skb, sizeof *header);
 
index d4e0057..ffb83b5 100644 (file)
@@ -529,21 +529,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
                          port_attr.state);
                return;
        }
+       priv->local_lid = port_attr.lid;
 
        if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
                ipoib_warn(priv, "ib_query_gid() failed\n");
        else
                memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
 
-       {
-               struct ib_port_attr attr;
-
-               if (!ib_query_port(priv->ca, priv->port, &attr))
-                       priv->local_lid = attr.lid;
-               else
-                       ipoib_warn(priv, "ib_query_port failed\n");
-       }
-
        if (!priv->broadcast) {
                struct ipoib_mcast *broadcast;
 
index 61ee91d..93ce62f 100644 (file)
@@ -344,7 +344,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
                     int is_leading)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
-       struct iscsi_session *session;
        struct iser_conn *ib_conn;
        struct iscsi_endpoint *ep;
        int error;
@@ -363,9 +362,17 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
        }
        ib_conn = ep->dd_data;
 
-       session = conn->session;
-       if (iser_alloc_rx_descriptors(ib_conn, session))
-               return -ENOMEM;
+       mutex_lock(&ib_conn->state_mutex);
+       if (ib_conn->state != ISER_CONN_UP) {
+               error = -EINVAL;
+               iser_err("iser_conn %p state is %d, teardown started\n",
+                        ib_conn, ib_conn->state);
+               goto out;
+       }
+
+       error = iser_alloc_rx_descriptors(ib_conn, conn->session);
+       if (error)
+               goto out;
 
        /* binds the iSER connection retrieved from the previously
         * connected ep_handle to the iSCSI layer connection. exchanges
@@ -375,7 +382,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
        conn->dd_data = ib_conn;
        ib_conn->iscsi_conn = conn;
 
-       return 0;
+out:
+       mutex_unlock(&ib_conn->state_mutex);
+       return error;
 }
 
 static int
index c877dad..9f0e0e3 100644 (file)
@@ -69,7 +69,7 @@
 
 #define DRV_NAME       "iser"
 #define PFX            DRV_NAME ": "
-#define DRV_VER                "1.4"
+#define DRV_VER                "1.4.1"
 
 #define iser_dbg(fmt, arg...)                          \
        do {                                            \
index 3ef167f..3bfec4b 100644 (file)
@@ -73,7 +73,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
 {
        struct iser_cq_desc *cq_desc;
        struct ib_device_attr *dev_attr = &device->dev_attr;
-       int ret, i, j;
+       int ret, i;
 
        ret = ib_query_device(device->ib_device, dev_attr);
        if (ret) {
@@ -125,16 +125,20 @@ static int iser_create_device_ib_res(struct iser_device *device)
                                          iser_cq_event_callback,
                                          (void *)&cq_desc[i],
                                          ISER_MAX_RX_CQ_LEN, i);
-               if (IS_ERR(device->rx_cq[i]))
+               if (IS_ERR(device->rx_cq[i])) {
+                       device->rx_cq[i] = NULL;
                        goto cq_err;
+               }
 
                device->tx_cq[i] = ib_create_cq(device->ib_device,
                                          NULL, iser_cq_event_callback,
                                          (void *)&cq_desc[i],
                                          ISER_MAX_TX_CQ_LEN, i);
 
-               if (IS_ERR(device->tx_cq[i]))
+               if (IS_ERR(device->tx_cq[i])) {
+                       device->tx_cq[i] = NULL;
                        goto cq_err;
+               }
 
                if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
                        goto cq_err;
@@ -160,14 +164,14 @@ static int iser_create_device_ib_res(struct iser_device *device)
 handler_err:
        ib_dereg_mr(device->mr);
 dma_mr_err:
-       for (j = 0; j < device->cqs_used; j++)
-               tasklet_kill(&device->cq_tasklet[j]);
+       for (i = 0; i < device->cqs_used; i++)
+               tasklet_kill(&device->cq_tasklet[i]);
 cq_err:
-       for (j = 0; j < i; j++) {
-               if (device->tx_cq[j])
-                       ib_destroy_cq(device->tx_cq[j]);
-               if (device->rx_cq[j])
-                       ib_destroy_cq(device->rx_cq[j]);
+       for (i = 0; i < device->cqs_used; i++) {
+               if (device->tx_cq[i])
+                       ib_destroy_cq(device->tx_cq[i]);
+               if (device->rx_cq[i])
+                       ib_destroy_cq(device->rx_cq[i]);
        }
        ib_dealloc_pd(device->pd);
 pd_err:
index d4c7928..da8ff12 100644 (file)
@@ -586,17 +586,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        init_completion(&isert_conn->conn_wait);
        init_completion(&isert_conn->conn_wait_comp_err);
        kref_init(&isert_conn->conn_kref);
-       kref_get(&isert_conn->conn_kref);
        mutex_init(&isert_conn->conn_mutex);
        spin_lock_init(&isert_conn->conn_lock);
        INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
 
        cma_id->context = isert_conn;
        isert_conn->conn_cm_id = cma_id;
-       isert_conn->responder_resources = event->param.conn.responder_resources;
-       isert_conn->initiator_depth = event->param.conn.initiator_depth;
-       pr_debug("Using responder_resources: %u initiator_depth: %u\n",
-                isert_conn->responder_resources, isert_conn->initiator_depth);
 
        isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
                                        ISER_RX_LOGIN_SIZE, GFP_KERNEL);
@@ -643,6 +638,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                goto out_rsp_dma_map;
        }
 
+       /* Set max inflight RDMA READ requests */
+       isert_conn->initiator_depth = min_t(u8,
+                               event->param.conn.initiator_depth,
+                               device->dev_attr.max_qp_init_rd_atom);
+       pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
+
        isert_conn->conn_device = device;
        isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
        if (IS_ERR(isert_conn->conn_pd)) {
@@ -746,7 +747,9 @@ isert_connect_release(struct isert_conn *isert_conn)
 static void
 isert_connected_handler(struct rdma_cm_id *cma_id)
 {
-       return;
+       struct isert_conn *isert_conn = cma_id->context;
+
+       kref_get(&isert_conn->conn_kref);
 }
 
 static void
@@ -798,7 +801,6 @@ isert_disconnect_work(struct work_struct *work)
 
 wake_up:
        complete(&isert_conn->conn_wait);
-       isert_put_conn(isert_conn);
 }
 
 static void
@@ -3067,7 +3069,6 @@ isert_rdma_accept(struct isert_conn *isert_conn)
        int ret;
 
        memset(&cp, 0, sizeof(struct rdma_conn_param));
-       cp.responder_resources = isert_conn->responder_resources;
        cp.initiator_depth = isert_conn->initiator_depth;
        cp.retry_count = 7;
        cp.rnr_retry_count = 7;
@@ -3215,7 +3216,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        pr_debug("isert_wait_conn: Starting \n");
 
        mutex_lock(&isert_conn->conn_mutex);
-       if (isert_conn->conn_cm_id) {
+       if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
                pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
                rdma_disconnect(isert_conn->conn_cm_id);
        }
@@ -3234,6 +3235,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        wait_for_completion(&isert_conn->conn_wait_comp_err);
 
        wait_for_completion(&isert_conn->conn_wait);
+       isert_put_conn(isert_conn);
 }
 
 static void isert_free_conn(struct iscsi_conn *conn)
index 2dd1d0d..6f5d795 100644 (file)
@@ -1791,14 +1791,6 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
-               },
-               .callback = atkbd_deactivate_fixup,
-       },
-       {
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
                },
                .callback = atkbd_deactivate_fixup,
        },
index 180b184..d70b65a 100644 (file)
@@ -33,8 +33,8 @@
 #define CAP1106_REG_SENSOR_CONFIG      0x22
 #define CAP1106_REG_SENSOR_CONFIG2     0x23
 #define CAP1106_REG_SAMPLING_CONFIG    0x24
-#define CAP1106_REG_CALIBRATION                0x25
-#define CAP1106_REG_INT_ENABLE         0x26
+#define CAP1106_REG_CALIBRATION                0x26
+#define CAP1106_REG_INT_ENABLE         0x27
 #define CAP1106_REG_REPEAT_RATE                0x28
 #define CAP1106_REG_MT_CONFIG          0x2a
 #define CAP1106_REG_MT_PATTERN_CONFIG  0x2b
index 8d2e19e..e651fa6 100644 (file)
@@ -332,23 +332,24 @@ static int matrix_keypad_init_gpio(struct platform_device *pdev,
        }
 
        if (pdata->clustered_irq > 0) {
-               err = request_irq(pdata->clustered_irq,
+               err = request_any_context_irq(pdata->clustered_irq,
                                matrix_keypad_interrupt,
                                pdata->clustered_irq_flags,
                                "matrix-keypad", keypad);
-               if (err) {
+               if (err < 0) {
                        dev_err(&pdev->dev,
                                "Unable to acquire clustered interrupt\n");
                        goto err_free_rows;
                }
        } else {
                for (i = 0; i < pdata->num_row_gpios; i++) {
-                       err = request_irq(gpio_to_irq(pdata->row_gpios[i]),
+                       err = request_any_context_irq(
+                                       gpio_to_irq(pdata->row_gpios[i]),
                                        matrix_keypad_interrupt,
                                        IRQF_TRIGGER_RISING |
                                        IRQF_TRIGGER_FALLING,
                                        "matrix-keypad", keypad);
-                       if (err) {
+                       if (err < 0) {
                                dev_err(&pdev->dev,
                                        "Unable to acquire interrupt for GPIO line %i\n",
                                        pdata->row_gpios[i]);
index a956b98..35a49bf 100644 (file)
@@ -2373,6 +2373,10 @@ int alps_init(struct psmouse *psmouse)
        dev2->keybit[BIT_WORD(BTN_LEFT)] =
                BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
 
+       __set_bit(INPUT_PROP_POINTER, dev2->propbit);
+       if (priv->flags & ALPS_DUALPOINT)
+               __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit);
+
        if (input_register_device(priv->dev2))
                goto init_fail;
 
index da51738..06fc6e7 100644 (file)
@@ -1331,6 +1331,13 @@ static bool elantech_is_signature_valid(const unsigned char *param)
        if (param[1] == 0)
                return true;
 
+       /*
+        * Some models have a revision higher then 20. Meaning param[2] may
+        * be 10 or 20, skip the rates check for these.
+        */
+       if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
+               return true;
+
        for (i = 0; i < ARRAY_SIZE(rates); i++)
                if (param[2] == rates[i])
                        return false;
@@ -1607,6 +1614,10 @@ int elantech_init(struct psmouse *psmouse)
                tp_dev->keybit[BIT_WORD(BTN_LEFT)] =
                        BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) |
                        BIT_MASK(BTN_RIGHT);
+
+               __set_bit(INPUT_PROP_POINTER, tp_dev->propbit);
+               __set_bit(INPUT_PROP_POINTING_STICK, tp_dev->propbit);
+
                error = input_register_device(etd->tp_dev);
                if (error < 0)
                        goto init_fail_tp_reg;
index cff065f..b4e1f01 100644 (file)
@@ -670,6 +670,8 @@ static void psmouse_apply_defaults(struct psmouse *psmouse)
        __set_bit(REL_X, input_dev->relbit);
        __set_bit(REL_Y, input_dev->relbit);
 
+       __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
        psmouse->set_rate = psmouse_set_rate;
        psmouse->set_resolution = psmouse_set_resolution;
        psmouse->poll = psmouse_poll;
index e8573c6..fd23181 100644 (file)
@@ -629,10 +629,61 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
                         ((buf[0] & 0x04) >> 1) |
                         ((buf[3] & 0x04) >> 2));
 
+               if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+                       SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
+                   hw->w == 2) {
+                       synaptics_parse_agm(buf, priv, hw);
+                       return 1;
+               }
+
+               hw->x = (((buf[3] & 0x10) << 8) |
+                        ((buf[1] & 0x0f) << 8) |
+                        buf[4]);
+               hw->y = (((buf[3] & 0x20) << 7) |
+                        ((buf[1] & 0xf0) << 4) |
+                        buf[5]);
+               hw->z = buf[2];
+
                hw->left  = (buf[0] & 0x01) ? 1 : 0;
                hw->right = (buf[0] & 0x02) ? 1 : 0;
 
-               if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
+               if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
+                       /*
+                        * ForcePads, like Clickpads, use middle button
+                        * bits to report primary button clicks.
+                        * Unfortunately they report primary button not
+                        * only when user presses on the pad above certain
+                        * threshold, but also when there are more than one
+                        * finger on the touchpad, which interferes with
+                        * out multi-finger gestures.
+                        */
+                       if (hw->z == 0) {
+                               /* No contacts */
+                               priv->press = priv->report_press = false;
+                       } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) {
+                               /*
+                                * Single-finger touch with pressure above
+                                * the threshold. If pressure stays long
+                                * enough, we'll start reporting primary
+                                * button. We rely on the device continuing
+                                * sending data even if finger does not
+                                * move.
+                                */
+                               if  (!priv->press) {
+                                       priv->press_start = jiffies;
+                                       priv->press = true;
+                               } else if (time_after(jiffies,
+                                               priv->press_start +
+                                                       msecs_to_jiffies(50))) {
+                                       priv->report_press = true;
+                               }
+                       } else {
+                               priv->press = false;
+                       }
+
+                       hw->left = priv->report_press;
+
+               } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
                        /*
                         * Clickpad's button is transmitted as middle button,
                         * however, since it is primary button, we will report
@@ -651,21 +702,6 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
                        hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
                }
 
-               if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
-                       SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
-                   hw->w == 2) {
-                       synaptics_parse_agm(buf, priv, hw);
-                       return 1;
-               }
-
-               hw->x = (((buf[3] & 0x10) << 8) |
-                        ((buf[1] & 0x0f) << 8) |
-                        buf[4]);
-               hw->y = (((buf[3] & 0x20) << 7) |
-                        ((buf[1] & 0xf0) << 4) |
-                        buf[5]);
-               hw->z = buf[2];
-
                if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
                    ((buf[0] ^ buf[3]) & 0x02)) {
                        switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
index e594af0..fb2e076 100644 (file)
  * 2   0x08    image sensor            image sensor tracks 5 fingers, but only
  *                                     reports 2.
  * 2   0x20    report min              query 0x0f gives min coord reported
+ * 2   0x80    forcepad                forcepad is a variant of clickpad that
+ *                                     does not have physical buttons but rather
+ *                                     uses pressure above certain threshold to
+ *                                     report primary clicks. Forcepads also have
+ *                                     clickpad bit set.
  */
 #define SYN_CAP_CLICKPAD(ex0c)         ((ex0c) & 0x100000) /* 1-button ClickPad */
 #define SYN_CAP_CLICKPAD2BTN(ex0c)     ((ex0c) & 0x000100) /* 2-button ClickPad */
@@ -86,6 +91,7 @@
 #define SYN_CAP_ADV_GESTURE(ex0c)      ((ex0c) & 0x080000)
 #define SYN_CAP_REDUCED_FILTERING(ex0c)        ((ex0c) & 0x000400)
 #define SYN_CAP_IMAGE_SENSOR(ex0c)     ((ex0c) & 0x000800)
+#define SYN_CAP_FORCEPAD(ex0c)         ((ex0c) & 0x008000)
 
 /* synaptics modes query bits */
 #define SYN_MODE_ABSOLUTE(m)           ((m) & (1 << 7))
@@ -177,6 +183,11 @@ struct synaptics_data {
         */
        struct synaptics_hw_state agm;
        bool agm_pending;                       /* new AGM packet received */
+
+       /* ForcePad handling */
+       unsigned long                           press_start;
+       bool                                    press;
+       bool                                    report_press;
 };
 
 void synaptics_module_init(void);
index e122bda..6bcc018 100644 (file)
@@ -387,6 +387,7 @@ static int synusb_probe(struct usb_interface *intf,
                __set_bit(EV_REL, input_dev->evbit);
                __set_bit(REL_X, input_dev->relbit);
                __set_bit(REL_Y, input_dev->relbit);
+               __set_bit(INPUT_PROP_POINTING_STICK, input_dev->propbit);
                input_set_abs_params(input_dev, ABS_PRESSURE, 0, 127, 0, 0);
        } else {
                input_set_abs_params(input_dev, ABS_X,
@@ -401,6 +402,11 @@ static int synusb_probe(struct usb_interface *intf,
                __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
        }
 
+       if (synusb->flags & SYNUSB_TOUCHSCREEN)
+               __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+       else
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
        __set_bit(BTN_LEFT, input_dev->keybit);
        __set_bit(BTN_RIGHT, input_dev->keybit);
        __set_bit(BTN_MIDDLE, input_dev->keybit);
index ca843b6..30c8b69 100644 (file)
@@ -393,6 +393,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
        if ((button_info & 0x0f) >= 3)
                __set_bit(BTN_MIDDLE, psmouse->dev->keybit);
 
+       __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
+       __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
+
        trackpoint_defaults(psmouse->private);
 
        error = trackpoint_power_on_reset(&psmouse->ps2dev);
index 136b7b2..713e3dd 100644 (file)
@@ -465,6 +465,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
                },
        },
+       {
+               /* Avatar AVIU-145A6 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
+               },
+       },
        { }
 };
 
@@ -608,6 +615,14 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
                },
        },
+       {
+               /* Fujitsu U574 laptop */
+               /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
+               },
+       },
        { }
 };
 
index 3807c3e..f5a98af 100644 (file)
@@ -1254,6 +1254,8 @@ static int __init i8042_create_aux_port(int idx)
        } else {
                snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx);
                snprintf(serio->phys, sizeof(serio->phys), I8042_MUX_PHYS_DESC, idx + 1);
+               strlcpy(serio->firmware_id, i8042_aux_firmware_id,
+                       sizeof(serio->firmware_id));
        }
 
        port->serio = serio;
index 0cb7ef5..69175b8 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/serio.h>
 #include <linux/tty.h>
+#include <linux/compat.h>
 
 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
 MODULE_DESCRIPTION("Input device TTY line discipline");
@@ -198,28 +199,55 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
        return 0;
 }
 
+static void serport_set_type(struct tty_struct *tty, unsigned long type)
+{
+       struct serport *serport = tty->disc_data;
+
+       serport->id.proto = type & 0x000000ff;
+       serport->id.id    = (type & 0x0000ff00) >> 8;
+       serport->id.extra = (type & 0x00ff0000) >> 16;
+}
+
 /*
  * serport_ldisc_ioctl() allows to set the port protocol, and device ID
  */
 
-static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg)
+static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
+                              unsigned int cmd, unsigned long arg)
 {
-       struct serport *serport = (struct serport*) tty->disc_data;
-       unsigned long type;
-
        if (cmd == SPIOCSTYPE) {
+               unsigned long type;
+
                if (get_user(type, (unsigned long __user *) arg))
                        return -EFAULT;
 
-               serport->id.proto = type & 0x000000ff;
-               serport->id.id    = (type & 0x0000ff00) >> 8;
-               serport->id.extra = (type & 0x00ff0000) >> 16;
+               serport_set_type(tty, type);
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_SPIOCSTYPE      _IOW('q', 0x01, compat_ulong_t)
+static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
+                                      struct file *file,
+                                      unsigned int cmd, unsigned long arg)
+{
+       if (cmd == COMPAT_SPIOCSTYPE) {
+               void __user *uarg = compat_ptr(arg);
+               compat_ulong_t compat_type;
+
+               if (get_user(compat_type, (compat_ulong_t __user *)uarg))
+                       return -EFAULT;
 
+               serport_set_type(tty, compat_type);
                return 0;
        }
 
        return -EINVAL;
 }
+#endif
 
 static void serport_ldisc_write_wakeup(struct tty_struct * tty)
 {
@@ -243,6 +271,9 @@ static struct tty_ldisc_ops serport_ldisc = {
        .close =        serport_ldisc_close,
        .read =         serport_ldisc_read,
        .ioctl =        serport_ldisc_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = serport_ldisc_compat_ioctl,
+#endif
        .receive_buf =  serport_ldisc_receive,
        .write_wakeup = serport_ldisc_write_wakeup
 };
index db178ed..aaacf8b 100644 (file)
@@ -837,7 +837,12 @@ static irqreturn_t mxt_process_messages_t44(struct mxt_data *data)
        count = data->msg_buf[0];
 
        if (count == 0) {
-               dev_warn(dev, "Interrupt triggered but zero messages\n");
+               /*
+                * This condition is caused by the CHG line being configured
+                * in Mode 0. It results in unnecessary I2C operations but it
+                * is benign.
+                */
+               dev_dbg(dev, "Interrupt triggered but zero messages\n");
                return IRQ_NONE;
        } else if (count > data->max_reportid) {
                dev_err(dev, "T44 count %d exceeded max report id\n", count);
@@ -1374,11 +1379,16 @@ static int mxt_get_info(struct mxt_data *data)
        return 0;
 }
 
-static void mxt_free_object_table(struct mxt_data *data)
+static void mxt_free_input_device(struct mxt_data *data)
 {
-       input_unregister_device(data->input_dev);
-       data->input_dev = NULL;
+       if (data->input_dev) {
+               input_unregister_device(data->input_dev);
+               data->input_dev = NULL;
+       }
+}
 
+static void mxt_free_object_table(struct mxt_data *data)
+{
        kfree(data->object_table);
        data->object_table = NULL;
        kfree(data->msg_buf);
@@ -1957,11 +1967,13 @@ static int mxt_load_fw(struct device *dev, const char *fn)
                ret = mxt_lookup_bootloader_address(data, 0);
                if (ret)
                        goto release_firmware;
+
+               mxt_free_input_device(data);
+               mxt_free_object_table(data);
        } else {
                enable_irq(data->irq);
        }
 
-       mxt_free_object_table(data);
        reinit_completion(&data->bl_completion);
 
        ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD, false);
@@ -2210,6 +2222,7 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
        return 0;
 
 err_free_object:
+       mxt_free_input_device(data);
        mxt_free_object_table(data);
 err_free_irq:
        free_irq(client->irq, data);
@@ -2224,7 +2237,7 @@ static int mxt_remove(struct i2c_client *client)
 
        sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
        free_irq(data->irq, data);
-       input_unregister_device(data->input_dev);
+       mxt_free_input_device(data);
        mxt_free_object_table(data);
        kfree(data);
 
index 16b5211..705ffa1 100644 (file)
@@ -41,7 +41,7 @@
  */
 static int rpu = 8;
 module_param(rpu, int, 0);
-MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
 
 /*
  * Set current used for pressure measurement.
index 7405353..572a5a6 100644 (file)
@@ -41,7 +41,7 @@
  */
 static int rpu = 8;
 module_param(rpu, int, 0);
-MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
 
 /*
  * Set current used for pressure measurement.
index ca18d6d..a83cc2a 100644 (file)
 #define ID0_CTTW                       (1 << 14)
 #define ID0_NUMIRPT_SHIFT              16
 #define ID0_NUMIRPT_MASK               0xff
+#define ID0_NUMSIDB_SHIFT              9
+#define ID0_NUMSIDB_MASK               0xf
 #define ID0_NUMSMRG_SHIFT              0
 #define ID0_NUMSMRG_MASK               0xff
 
@@ -524,9 +526,18 @@ static int register_smmu_master(struct arm_smmu_device *smmu,
        master->of_node                 = masterspec->np;
        master->cfg.num_streamids       = masterspec->args_count;
 
-       for (i = 0; i < master->cfg.num_streamids; ++i)
-               master->cfg.streamids[i] = masterspec->args[i];
+       for (i = 0; i < master->cfg.num_streamids; ++i) {
+               u16 streamid = masterspec->args[i];
 
+               if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
+                    (streamid >= smmu->num_mapping_groups)) {
+                       dev_err(dev,
+                               "stream ID for master device %s greater than maximum allowed (%d)\n",
+                               masterspec->np->name, smmu->num_mapping_groups);
+                       return -ERANGE;
+               }
+               master->cfg.streamids[i] = streamid;
+       }
        return insert_smmu_master(smmu, master);
 }
 
@@ -623,7 +634,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
 
        if (fsr & FSR_IGN)
                dev_err_ratelimited(smmu->dev,
-                                   "Unexpected context fault (fsr 0x%u)\n",
+                                   "Unexpected context fault (fsr 0x%x)\n",
                                    fsr);
 
        fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
@@ -752,6 +763,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
                        reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
                        break;
                case 39:
+               case 40:
                        reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
                        break;
                case 42:
@@ -773,6 +785,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
                        reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
                        break;
                case 39:
+               case 40:
                        reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
                        break;
                case 42:
@@ -843,8 +856,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
        reg |= TTBCR_EAE |
              (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
              (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
-             (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
-             (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+             (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
+
+       if (!stage1)
+               reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+
        writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
 
        /* MAIR0 (stage-1 only) */
@@ -868,10 +884,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                                        struct arm_smmu_device *smmu)
 {
-       int irq, ret, start;
+       int irq, start, ret = 0;
+       unsigned long flags;
        struct arm_smmu_domain *smmu_domain = domain->priv;
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
 
+       spin_lock_irqsave(&smmu_domain->lock, flags);
+       if (smmu_domain->smmu)
+               goto out_unlock;
+
        if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
                /*
                 * We will likely want to change this if/when KVM gets
@@ -890,7 +911,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
                                      smmu->num_context_banks);
        if (IS_ERR_VALUE(ret))
-               return ret;
+               goto out_unlock;
 
        cfg->cbndx = ret;
        if (smmu->version == 1) {
@@ -900,6 +921,10 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                cfg->irptndx = cfg->cbndx;
        }
 
+       ACCESS_ONCE(smmu_domain->smmu) = smmu;
+       arm_smmu_init_context_bank(smmu_domain);
+       spin_unlock_irqrestore(&smmu_domain->lock, flags);
+
        irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
        ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
                          "arm-smmu-context-fault", domain);
@@ -907,15 +932,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
                        cfg->irptndx, irq);
                cfg->irptndx = INVALID_IRPTNDX;
-               goto out_free_context;
        }
 
-       smmu_domain->smmu = smmu;
-       arm_smmu_init_context_bank(smmu_domain);
        return 0;
 
-out_free_context:
-       __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+out_unlock:
+       spin_unlock_irqrestore(&smmu_domain->lock, flags);
        return ret;
 }
 
@@ -975,7 +997,6 @@ static void arm_smmu_free_ptes(pmd_t *pmd)
 {
        pgtable_t table = pmd_pgtable(*pmd);
 
-       pgtable_page_dtor(table);
        __free_page(table);
 }
 
@@ -1108,6 +1129,9 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
        struct arm_smmu_smr *smrs = cfg->smrs;
 
+       if (!smrs)
+               return;
+
        /* Invalidate the SMRs before freeing back to the allocator */
        for (i = 0; i < cfg->num_streamids; ++i) {
                u8 idx = smrs[i].idx;
@@ -1120,20 +1144,6 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
        kfree(smrs);
 }
 
-static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu,
-                                          struct arm_smmu_master_cfg *cfg)
-{
-       int i;
-       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-
-       for (i = 0; i < cfg->num_streamids; ++i) {
-               u16 sid = cfg->streamids[i];
-
-               writel_relaxed(S2CR_TYPE_BYPASS,
-                              gr0_base + ARM_SMMU_GR0_S2CR(sid));
-       }
-}
-
 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
                                      struct arm_smmu_master_cfg *cfg)
 {
@@ -1160,23 +1170,30 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
                                          struct arm_smmu_master_cfg *cfg)
 {
+       int i;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
        /*
         * We *must* clear the S2CR first, because freeing the SMR means
         * that it can be re-allocated immediately.
         */
-       arm_smmu_bypass_stream_mapping(smmu, cfg);
+       for (i = 0; i < cfg->num_streamids; ++i) {
+               u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+
+               writel_relaxed(S2CR_TYPE_BYPASS,
+                              gr0_base + ARM_SMMU_GR0_S2CR(idx));
+       }
+
        arm_smmu_master_free_smrs(smmu, cfg);
 }
 
 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
-       int ret = -EINVAL;
+       int ret;
        struct arm_smmu_domain *smmu_domain = domain->priv;
-       struct arm_smmu_device *smmu;
+       struct arm_smmu_device *smmu, *dom_smmu;
        struct arm_smmu_master_cfg *cfg;
-       unsigned long flags;
 
        smmu = dev_get_master_dev(dev)->archdata.iommu;
        if (!smmu) {
@@ -1188,20 +1205,22 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
         * Sanity check the domain. We don't support domains across
         * different SMMUs.
         */
-       spin_lock_irqsave(&smmu_domain->lock, flags);
-       if (!smmu_domain->smmu) {
+       dom_smmu = ACCESS_ONCE(smmu_domain->smmu);
+       if (!dom_smmu) {
                /* Now that we have a master, we can finalise the domain */
                ret = arm_smmu_init_domain_context(domain, smmu);
                if (IS_ERR_VALUE(ret))
-                       goto err_unlock;
-       } else if (smmu_domain->smmu != smmu) {
+                       return ret;
+
+               dom_smmu = smmu_domain->smmu;
+       }
+
+       if (dom_smmu != smmu) {
                dev_err(dev,
                        "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
-                       dev_name(smmu_domain->smmu->dev),
-                       dev_name(smmu->dev));
-               goto err_unlock;
+                       dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
+               return -EINVAL;
        }
-       spin_unlock_irqrestore(&smmu_domain->lock, flags);
 
        /* Looks ok, so add the device to the domain */
        cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
@@ -1209,10 +1228,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
                return -ENODEV;
 
        return arm_smmu_domain_add_master(smmu_domain, cfg);
-
-err_unlock:
-       spin_unlock_irqrestore(&smmu_domain->lock, flags);
-       return ret;
 }
 
 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1247,10 +1262,6 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
                        return -ENOMEM;
 
                arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
-               if (!pgtable_page_ctor(table)) {
-                       __free_page(table);
-                       return -ENOMEM;
-               }
                pmd_populate(NULL, pmd, table);
                arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
        }
@@ -1626,7 +1637,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
 
        /* Mark all SMRn as invalid and all S2CRn as bypass */
        for (i = 0; i < smmu->num_mapping_groups; ++i) {
-               writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i));
+               writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
                writel_relaxed(S2CR_TYPE_BYPASS,
                        gr0_base + ARM_SMMU_GR0_S2CR(i));
        }
@@ -1761,6 +1772,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                dev_notice(smmu->dev,
                           "\tstream matching with %u register groups, mask 0x%x",
                           smmu->num_mapping_groups, mask);
+       } else {
+               smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
+                                          ID0_NUMSIDB_MASK;
        }
 
        /* ID1 */
@@ -1794,11 +1808,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
         * Stage-1 output limited by stage-2 input size due to pgd
         * allocation (PTRS_PER_PGD).
         */
+       if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
 #ifdef CONFIG_64BIT
-       smmu->s1_output_size = min_t(unsigned long, VA_BITS, size);
+               smmu->s1_output_size = min_t(unsigned long, VA_BITS, size);
 #else
-       smmu->s1_output_size = min(32UL, size);
+               smmu->s1_output_size = min(32UL, size);
 #endif
+       } else {
+               smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT,
+                                            size);
+       }
 
        /* The stage-2 output mask is also applied for bypass */
        size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
@@ -1889,6 +1908,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
                smmu->irqs[i] = irq;
        }
 
+       err = arm_smmu_device_cfg_probe(smmu);
+       if (err)
+               return err;
+
        i = 0;
        smmu->masters = RB_ROOT;
        while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
@@ -1905,10 +1928,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
        }
        dev_notice(dev, "registered %d master devices\n", i);
 
-       err = arm_smmu_device_cfg_probe(smmu);
-       if (err)
-               goto out_put_masters;
-
        parse_driver_options(smmu);
 
        if (smmu->version > 1 &&
index 60ab474..06d268a 100644 (file)
@@ -678,8 +678,7 @@ static int __init dmar_acpi_dev_scope_init(void)
                                       andd->device_name);
                                continue;
                        }
-                       acpi_bus_get_device(h, &adev);
-                       if (!adev) {
+                       if (acpi_bus_get_device(h, &adev)) {
                                pr_err("Failed to get device for ACPI object %s\n",
                                       andd->device_name);
                                continue;
index 61d1daf..56feed7 100644 (file)
@@ -984,7 +984,7 @@ static int fsl_pamu_add_device(struct device *dev)
        struct iommu_group *group = ERR_PTR(-ENODEV);
        struct pci_dev *pdev;
        const u32 *prop;
-       int ret, len;
+       int ret = 0, len;
 
        /*
         * For platform devices we allocate a separate group for
@@ -1007,7 +1007,13 @@ static int fsl_pamu_add_device(struct device *dev)
        if (IS_ERR(group))
                return PTR_ERR(group);
 
-       ret = iommu_group_add_device(group, dev);
+       /*
+        * Check if device has already been added to an iommu group.
+        * Group could have already been created for a PCI device in
+        * the iommu_group_get_for_dev path.
+        */
+       if (!dev->iommu_group)
+               ret = iommu_group_add_device(group, dev);
 
        iommu_group_put(group);
        return ret;
index ac4adb3..0639b92 100644 (file)
@@ -678,15 +678,17 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
  */
 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 {
-       struct iommu_group *group = ERR_PTR(-EIO);
+       struct iommu_group *group;
        int ret;
 
        group = iommu_group_get(dev);
        if (group)
                return group;
 
-       if (dev_is_pci(dev))
-               group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
+       if (!dev_is_pci(dev))
+               return ERR_PTR(-EINVAL);
+
+       group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
 
        if (IS_ERR(group))
                return group;
index f8636a6..5945223 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 
index 85c2985..bbbaf5d 100644 (file)
@@ -220,7 +220,7 @@ static int __init crossbar_of_init(struct device_node *node)
                        of_property_read_u32_index(node,
                                                   "ti,irqs-reserved",
                                                   i, &entry);
-                       if (entry > max) {
+                       if (entry >= max) {
                                pr_err("Invalid reserved entry\n");
                                ret = -EINVAL;
                                goto err_irq_map;
@@ -238,7 +238,7 @@ static int __init crossbar_of_init(struct device_node *node)
                        of_property_read_u32_index(node,
                                                   "ti,irqs-skip",
                                                   i, &entry);
-                       if (entry > max) {
+                       if (entry >= max) {
                                pr_err("Invalid skip entry\n");
                                ret = -EINVAL;
                                goto err_irq_map;
index 57eaa5a..a0698b4 100644 (file)
@@ -36,7 +36,7 @@
 struct gic_chip_data {
        void __iomem            *dist_base;
        void __iomem            **redist_base;
-       void __percpu __iomem   **rdist;
+       void __iomem * __percpu *rdist;
        struct irq_domain       *domain;
        u64                     redist_stride;
        u32                     redist_regions;
@@ -104,7 +104,7 @@ static void gic_redist_wait_for_rwp(void)
 }
 
 /* Low level accessors */
-static u64 gic_read_iar(void)
+static u64 __maybe_unused gic_read_iar(void)
 {
        u64 irqstat;
 
@@ -112,24 +112,24 @@ static u64 gic_read_iar(void)
        return irqstat;
 }
 
-static void gic_write_pmr(u64 val)
+static void __maybe_unused gic_write_pmr(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
 }
 
-static void gic_write_ctlr(u64 val)
+static void __maybe_unused gic_write_ctlr(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
        isb();
 }
 
-static void gic_write_grpen1(u64 val)
+static void __maybe_unused gic_write_grpen1(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
        isb();
 }
 
-static void gic_write_sgi1r(u64 val)
+static void __maybe_unused gic_write_sgi1r(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
 }
@@ -200,19 +200,6 @@ static void gic_poke_irq(struct irq_data *d, u32 offset)
        rwp_wait();
 }
 
-static int gic_peek_irq(struct irq_data *d, u32 offset)
-{
-       u32 mask = 1 << (gic_irq(d) % 32);
-       void __iomem *base;
-
-       if (gic_irq_in_rdist(d))
-               base = gic_data_rdist_sgi_base();
-       else
-               base = gic_data.dist_base;
-
-       return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
-}
-
 static void gic_mask_irq(struct irq_data *d)
 {
        gic_poke_irq(d, GICD_ICENABLER);
@@ -401,6 +388,19 @@ static void gic_cpu_init(void)
 }
 
 #ifdef CONFIG_SMP
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+       u32 mask = 1 << (gic_irq(d) % 32);
+       void __iomem *base;
+
+       if (gic_irq_in_rdist(d))
+               base = gic_data_rdist_sgi_base();
+       else
+               base = gic_data.dist_base;
+
+       return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+}
+
 static int gic_secondary_init(struct notifier_block *nfb,
                              unsigned long action, void *hcpu)
 {
index 4b959e6..dda6dbc 100644 (file)
@@ -867,7 +867,7 @@ static int gic_routable_irq_domain_xlate(struct irq_domain *d,
        return 0;
 }
 
-const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
+static const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
        .map = gic_routable_irq_domain_map,
        .unmap = gic_routable_irq_domain_unmap,
        .xlate = gic_routable_irq_domain_xlate,
index 1af40ee..7130505 100644 (file)
@@ -895,8 +895,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
        struct cache *cache = mg->cache;
 
        if (mg->writeback) {
-               cell_defer(cache, mg->old_ocell, false);
                clear_dirty(cache, mg->old_oblock, mg->cblock);
+               cell_defer(cache, mg->old_ocell, false);
                cleanup_migration(mg);
                return;
 
@@ -951,13 +951,13 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
                }
 
        } else {
+               clear_dirty(cache, mg->new_oblock, mg->cblock);
                if (mg->requeue_holder)
                        cell_defer(cache, mg->new_ocell, true);
                else {
                        bio_endio(mg->new_ocell->holder, 0);
                        cell_defer(cache, mg->new_ocell, false);
                }
-               clear_dirty(cache, mg->new_oblock, mg->cblock);
                cleanup_migration(mg);
        }
 }
index f60bad4..3c89fcb 100644 (file)
@@ -182,7 +182,6 @@ config MEDIA_SUBDRV_AUTOSELECT
        depends on HAS_IOMEM
        select I2C
        select I2C_MUX
-       select SPI
        default y
        help
          By default, a media driver auto-selects all possible ancillary
index 5135a09..12ce19c 100644 (file)
 #define USB_PID_PCTV_400E                              0x020f
 #define USB_PID_PCTV_450E                              0x0222
 #define USB_PID_PCTV_452E                              0x021f
+#define USB_PID_PCTV_78E                               0x025a
+#define USB_PID_PCTV_79E                               0x0262
 #define USB_PID_REALTEK_RTL2831U                       0x2831
 #define USB_PID_REALTEK_RTL2832U                       0x2832
 #define USB_PID_TECHNOTREND_CONNECT_S2_3600            0x3007
index be4bec2..5c90ea6 100644 (file)
@@ -314,6 +314,19 @@ static int af9033_init(struct dvb_frontend *fe)
                        goto err;
        }
 
+       /* feed clock to RF tuner */
+       switch (state->cfg.tuner) {
+       case AF9033_TUNER_IT9135_38:
+       case AF9033_TUNER_IT9135_51:
+       case AF9033_TUNER_IT9135_52:
+       case AF9033_TUNER_IT9135_60:
+       case AF9033_TUNER_IT9135_61:
+       case AF9033_TUNER_IT9135_62:
+               ret = af9033_wr_reg(state, 0x80fba8, 0x00);
+               if (ret < 0)
+                       goto err;
+       }
+
        /* settings for TS interface */
        if (state->cfg.ts_mode == AF9033_TS_MODE_USB) {
                ret = af9033_wr_reg_mask(state, 0x80f9a5, 0x00, 0x01);
index fc2ad58..ded7b67 100644 (file)
@@ -1418,7 +1418,7 @@ static const struct reg_val tuner_init_it9135_60[] = {
        { 0x800068, 0x0a },
        { 0x80006a, 0x03 },
        { 0x800070, 0x0a },
-       { 0x800071, 0x05 },
+       { 0x800071, 0x0a },
        { 0x800072, 0x02 },
        { 0x800075, 0x8c },
        { 0x800076, 0x8c },
@@ -1484,7 +1484,6 @@ static const struct reg_val tuner_init_it9135_60[] = {
        { 0x800104, 0x02 },
        { 0x800105, 0xbe },
        { 0x800106, 0x00 },
-       { 0x800109, 0x02 },
        { 0x800115, 0x0a },
        { 0x800116, 0x03 },
        { 0x80011a, 0xbe },
@@ -1510,7 +1509,6 @@ static const struct reg_val tuner_init_it9135_60[] = {
        { 0x80014b, 0x8c },
        { 0x80014d, 0xac },
        { 0x80014e, 0xc6 },
-       { 0x80014f, 0x03 },
        { 0x800151, 0x1e },
        { 0x800153, 0xbc },
        { 0x800178, 0x09 },
@@ -1522,9 +1520,10 @@ static const struct reg_val tuner_init_it9135_60[] = {
        { 0x80018d, 0x5f },
        { 0x80018f, 0xa0 },
        { 0x800190, 0x5a },
-       { 0x80ed02, 0xff },
-       { 0x80ee42, 0xff },
-       { 0x80ee82, 0xff },
+       { 0x800191, 0x00 },
+       { 0x80ed02, 0x40 },
+       { 0x80ee42, 0x40 },
+       { 0x80ee82, 0x40 },
        { 0x80f000, 0x0f },
        { 0x80f01f, 0x8c },
        { 0x80f020, 0x00 },
@@ -1699,7 +1698,6 @@ static const struct reg_val tuner_init_it9135_61[] = {
        { 0x800104, 0x02 },
        { 0x800105, 0xc8 },
        { 0x800106, 0x00 },
-       { 0x800109, 0x02 },
        { 0x800115, 0x0a },
        { 0x800116, 0x03 },
        { 0x80011a, 0xc6 },
@@ -1725,7 +1723,6 @@ static const struct reg_val tuner_init_it9135_61[] = {
        { 0x80014b, 0x8c },
        { 0x80014d, 0xa8 },
        { 0x80014e, 0xc6 },
-       { 0x80014f, 0x03 },
        { 0x800151, 0x28 },
        { 0x800153, 0xcc },
        { 0x800178, 0x09 },
@@ -1737,9 +1734,10 @@ static const struct reg_val tuner_init_it9135_61[] = {
        { 0x80018d, 0x5f },
        { 0x80018f, 0xfb },
        { 0x800190, 0x5c },
-       { 0x80ed02, 0xff },
-       { 0x80ee42, 0xff },
-       { 0x80ee82, 0xff },
+       { 0x800191, 0x00 },
+       { 0x80ed02, 0x40 },
+       { 0x80ee42, 0x40 },
+       { 0x80ee82, 0x40 },
        { 0x80f000, 0x0f },
        { 0x80f01f, 0x8c },
        { 0x80f020, 0x00 },
index 1eaf975..62acb10 100644 (file)
@@ -1282,19 +1282,12 @@ static int smiapp_set_power(struct v4l2_subdev *subdev, int on)
 
        mutex_lock(&sensor->power_mutex);
 
-       /*
-        * If the power count is modified from 0 to != 0 or from != 0
-        * to 0, update the power state.
-        */
-       if (!sensor->power_count == !on)
-               goto out;
-
-       if (on) {
+       if (on && !sensor->power_count) {
                /* Power on and perform initialisation. */
                ret = smiapp_power_on(sensor);
                if (ret < 0)
                        goto out;
-       } else {
+       } else if (!on && sensor->power_count == 1) {
                smiapp_power_off(sensor);
        }
 
@@ -2572,7 +2565,7 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
 
                this->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
                this->sd.internal_ops = &smiapp_internal_ops;
-               this->sd.owner = NULL;
+               this->sd.owner = THIS_MODULE;
                v4l2_set_subdevdata(&this->sd, client);
 
                rval = media_entity_init(&this->sd.entity,
index 716bdc5..83f5074 100644 (file)
@@ -1091,6 +1091,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
                setup.addr = ADDR_UNSET;
                setup.type = cx->options.tuner;
                setup.mode_mask = T_ANALOG_TV;  /* matches TV tuners */
+               setup.config = NULL;
                if (cx->options.radio > 0)
                        setup.mode_mask |= T_RADIO;
                setup.tuner_callback = (setup.type == TUNER_XC2028) ?
index 6f30d7e..3d83c42 100644 (file)
@@ -396,6 +396,7 @@ struct dvb_frontend *it913x_attach(struct dvb_frontend *fe,
                struct i2c_adapter *i2c_adap, u8 i2c_addr, u8 config)
 {
        struct it913x_state *state = NULL;
+       int ret;
 
        /* allocate memory for the internal state */
        state = kzalloc(sizeof(struct it913x_state), GFP_KERNEL);
@@ -425,6 +426,11 @@ struct dvb_frontend *it913x_attach(struct dvb_frontend *fe,
        state->tuner_type = config;
        state->firmware_ver = 1;
 
+       /* tuner RF initial */
+       ret = it913x_wr_reg(state, PRO_DMOD, 0xec4c, 0x68);
+       if (ret < 0)
+               goto error;
+
        fe->tuner_priv = state;
        memcpy(&fe->ops.tuner_ops, &it913x_tuner_ops,
                        sizeof(struct dvb_tuner_ops));
index 75ec1c6..c82beac 100644 (file)
@@ -1575,6 +1575,10 @@ static const struct usb_device_id af9035_id_table[] = {
                &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
        { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
                &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
+       { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_78E,
+               &af9035_props, "PCTV 78e", RC_MAP_IT913X_V1) },
+       { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_79E,
+               &af9035_props, "PCTV 79e", RC_MAP_IT913X_V2) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, af9035_id_table);
index a34a11d..63ca984 100644 (file)
@@ -29,7 +29,7 @@ config FUSION_SPI
 config FUSION_FC
        tristate "Fusion MPT ScsiHost drivers for FC"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        ---help---
          SCSI HOST support for a Fiber Channel host adapters.
 
index 7ffdb58..7e1efd5 100644 (file)
@@ -79,6 +79,11 @@ static void firmware_load(const struct firmware *fw, void *context)
        u32 jedec_id;
        u32 status;
 
+       if (fw == NULL) {
+               dev_err(&spi->dev, "Cannot load firmware, aborting\n");
+               return;
+       }
+
        if (fw->size == 0) {
                dev_err(&spi->dev, "Error: Firmware size is 0!\n");
                return;
index f0f5eab..798ae69 100644 (file)
@@ -175,7 +175,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
                                "the same MAC; 0 for none (default), "
                                "1 for active, 2 for follow");
 module_param(all_slaves_active, int, 0);
-MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
+MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
                                     "by setting active flag for all slaves; "
                                     "0 for never (default), 1 for always.");
 module_param(resend_igmp, int, 0);
@@ -3659,8 +3659,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
                else
                        bond_xmit_slave_id(bond, skb, 0);
        } else {
-               slave_id = bond_rr_gen_slave_id(bond);
-               bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt);
+               int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+
+               if (likely(slave_cnt)) {
+                       slave_id = bond_rr_gen_slave_id(bond);
+                       bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
+               } else {
+                       dev_kfree_skb_any(skb);
+               }
        }
 
        return NETDEV_TX_OK;
@@ -3691,8 +3697,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
 static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
 
-       bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
+       if (likely(slave_cnt))
+               bond_xmit_slave_id(bond, skb,
+                                  bond_xmit_hash(bond, skb) % slave_cnt);
+       else
+               dev_kfree_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
index f07fa89..05e1aa0 100644 (file)
@@ -1123,7 +1123,9 @@ static int at91_open(struct net_device *dev)
        struct at91_priv *priv = netdev_priv(dev);
        int err;
 
-       clk_enable(priv->clk);
+       err = clk_prepare_enable(priv->clk);
+       if (err)
+               return err;
 
        /* check or determine and set bittime */
        err = open_candev(dev);
@@ -1149,7 +1151,7 @@ static int at91_open(struct net_device *dev)
  out_close:
        close_candev(dev);
  out:
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        return err;
 }
@@ -1166,7 +1168,7 @@ static int at91_close(struct net_device *dev)
        at91_chip_stop(dev, CAN_STATE_STOPPED);
 
        free_irq(dev->irq, dev);
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        close_candev(dev);
 
index 109cb44..fb279d6 100644 (file)
@@ -97,14 +97,14 @@ static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
        ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
        writel(ctrl, priv->raminit_ctrlreg);
        ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
-       c_can_hw_raminit_wait_ti(priv, ctrl, mask);
+       c_can_hw_raminit_wait_ti(priv, mask, ctrl);
 
        if (enable) {
                /* Set start bit and wait for the done bit. */
                ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
                writel(ctrl, priv->raminit_ctrlreg);
                ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
-               c_can_hw_raminit_wait_ti(priv, ctrl, mask);
+               c_can_hw_raminit_wait_ti(priv, mask, ctrl);
        }
        spin_unlock(&raminit_lock);
 }
index 944aa5d..6586309 100644 (file)
@@ -62,7 +62,7 @@
 #define FLEXCAN_MCR_BCC                        BIT(16)
 #define FLEXCAN_MCR_LPRIO_EN           BIT(13)
 #define FLEXCAN_MCR_AEN                        BIT(12)
-#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0x1f)
+#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0x7f)
 #define FLEXCAN_MCR_IDAM_A             (0 << 8)
 #define FLEXCAN_MCR_IDAM_B             (1 << 8)
 #define FLEXCAN_MCR_IDAM_C             (2 << 8)
         FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
 
 /* FLEXCAN interrupt flag register (IFLAG) bits */
-#define FLEXCAN_TX_BUF_ID              8
+/* Errata ERR005829 step7: Reserve first valid MB */
+#define FLEXCAN_TX_BUF_RESERVED                8
+#define FLEXCAN_TX_BUF_ID              9
 #define FLEXCAN_IFLAG_BUF(x)           BIT(x)
 #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
 #define FLEXCAN_IFLAG_RX_FIFO_WARN     BIT(6)
 
 /* FLEXCAN message buffers */
 #define FLEXCAN_MB_CNT_CODE(x)         (((x) & 0xf) << 24)
+#define FLEXCAN_MB_CODE_RX_INACTIVE    (0x0 << 24)
+#define FLEXCAN_MB_CODE_RX_EMPTY       (0x4 << 24)
+#define FLEXCAN_MB_CODE_RX_FULL                (0x2 << 24)
+#define FLEXCAN_MB_CODE_RX_OVERRRUN    (0x6 << 24)
+#define FLEXCAN_MB_CODE_RX_RANSWER     (0xa << 24)
+
+#define FLEXCAN_MB_CODE_TX_INACTIVE    (0x8 << 24)
+#define FLEXCAN_MB_CODE_TX_ABORT       (0x9 << 24)
+#define FLEXCAN_MB_CODE_TX_DATA                (0xc << 24)
+#define FLEXCAN_MB_CODE_TX_TANSWER     (0xe << 24)
+
 #define FLEXCAN_MB_CNT_SRR             BIT(22)
 #define FLEXCAN_MB_CNT_IDE             BIT(21)
 #define FLEXCAN_MB_CNT_RTR             BIT(20)
@@ -298,7 +311,7 @@ static int flexcan_chip_enable(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
                return -ETIMEDOUT;
@@ -317,7 +330,7 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
                return -ETIMEDOUT;
@@ -336,7 +349,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
-               usleep_range(100, 200);
+               udelay(100);
 
        if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
                return -ETIMEDOUT;
@@ -355,7 +368,7 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
                return -ETIMEDOUT;
@@ -370,7 +383,7 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv)
 
        flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
        while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
                return -ETIMEDOUT;
@@ -428,6 +441,14 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
        flexcan_write(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
        flexcan_write(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
 
+       /* Errata ERR005829 step8:
+        * Write twice INACTIVE(0x8) code to first MB.
+        */
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                     &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                     &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+
        return NETDEV_TX_OK;
 }
 
@@ -744,6 +765,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
                stats->tx_bytes += can_get_echo_skb(dev, 0);
                stats->tx_packets++;
                can_led_event(dev, CAN_LED_EVENT_TX);
+               /* after sending a RTR frame mailbox is in RX mode */
+               flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                             &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
                flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
                netif_wake_queue(dev);
        }
@@ -801,6 +825,7 @@ static int flexcan_chip_start(struct net_device *dev)
        struct flexcan_regs __iomem *regs = priv->base;
        int err;
        u32 reg_mcr, reg_ctrl;
+       int i;
 
        /* enable module */
        err = flexcan_chip_enable(priv);
@@ -867,8 +892,18 @@ static int flexcan_chip_start(struct net_device *dev)
        netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
        flexcan_write(reg_ctrl, &regs->ctrl);
 
-       /* Abort any pending TX, mark Mailbox as INACTIVE */
-       flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+       /* clear and invalidate all mailboxes first */
+       for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) {
+               flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
+                             &regs->cantxfg[i].can_ctrl);
+       }
+
+       /* Errata ERR005829: mark first TX mailbox as INACTIVE */
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                     &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+
+       /* mark TX mailbox as INACTIVE */
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
                      &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
 
        /* acceptance mask/acceptance code (accept everything) */
index 7a85590..e5fac36 100644 (file)
@@ -70,6 +70,8 @@ struct peak_pci_chan {
 #define PEAK_PC_104P_DEVICE_ID 0x0006  /* PCAN-PC/104+ cards */
 #define PEAK_PCI_104E_DEVICE_ID        0x0007  /* PCAN-PCI/104 Express cards */
 #define PEAK_MPCIE_DEVICE_ID   0x0008  /* The miniPCIe slot cards */
+#define PEAK_PCIE_OEM_ID       0x0009  /* PCAN-PCI Express OEM */
+#define PEAK_PCIEC34_DEVICE_ID 0x000A  /* PCAN-PCI Express 34 (one channel) */
 
 #define PEAK_PCI_CHAN_MAX      4
 
@@ -87,6 +89,7 @@ static const struct pci_device_id peak_pci_tbl[] = {
        {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 #ifdef CONFIG_CAN_PEAK_PCIEC
        {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+       {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 #endif
        {0,}
 };
@@ -653,7 +656,8 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                 * This must be done *before* register_sja1000dev() but
                 * *after* devices linkage
                 */
-               if (pdev->device == PEAK_PCIEC_DEVICE_ID) {
+               if (pdev->device == PEAK_PCIEC_DEVICE_ID ||
+                   pdev->device == PEAK_PCIEC34_DEVICE_ID) {
                        err = peak_pciec_probe(pdev, dev);
                        if (err) {
                                dev_err(&pdev->dev,
index 3fe45c7..8ca49f0 100644 (file)
@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
        int entry = vp->cur_tx % TX_RING_SIZE;
        struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
        unsigned long flags;
+       dma_addr_t dma_addr;
 
        if (vortex_debug > 6) {
                pr_debug("boomerang_start_xmit()\n");
@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
 
        if (!skb_shinfo(skb)->nr_frags) {
-               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
-                                                                               skb->len, PCI_DMA_TODEVICE));
+               dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
+                                         PCI_DMA_TODEVICE);
+               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+                       goto out_dma_err;
+
+               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
                vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
        } else {
                int i;
 
-               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
-                                                                               skb_headlen(skb), PCI_DMA_TODEVICE));
+               dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
+                                         skb_headlen(skb), PCI_DMA_TODEVICE);
+               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+                       goto out_dma_err;
+
+               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
                vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
 
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
+                       dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
+                                                   0,
+                                                   frag->size,
+                                                   DMA_TO_DEVICE);
+                       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
+                               for(i = i-1; i >= 0; i--)
+                                       dma_unmap_page(&VORTEX_PCI(vp)->dev,
+                                                      le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
+                                                      le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
+                                                      DMA_TO_DEVICE);
+
+                               pci_unmap_single(VORTEX_PCI(vp),
+                                                le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
+                                                le32_to_cpu(vp->tx_ring[entry].frag[0].length),
+                                                PCI_DMA_TODEVICE);
+
+                               goto out_dma_err;
+                       }
+
                        vp->tx_ring[entry].frag[i+1].addr =
-                                       cpu_to_le32(skb_frag_dma_map(
-                                               &VORTEX_PCI(vp)->dev,
-                                               frag,
-                                               frag->page_offset, frag->size, DMA_TO_DEVICE));
+                                               cpu_to_le32(dma_addr);
 
                        if (i == skb_shinfo(skb)->nr_frags-1)
                                        vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 #else
-       vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+       dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+               goto out_dma_err;
+       vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
        vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
        vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
 #endif
@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_tx_timestamp(skb);
        iowrite16(DownUnstall, ioaddr + EL3_CMD);
        spin_unlock_irqrestore(&vp->lock, flags);
+out:
        return NETDEV_TX_OK;
+out_dma_err:
+       dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
+       goto out;
 }
 
 /* The interrupt handler does all of the Rx thread work and cleans up
index fe5cfea..5919394 100644 (file)
 #define DRV_NAME       "arc_emac"
 #define DRV_VERSION    "1.0"
 
+/**
+ * arc_emac_tx_avail - Return the number of available slots in the tx ring.
+ * @priv: Pointer to ARC EMAC private data structure.
+ *
+ * returns: the number of slots available for transmission in tx the ring.
+ */
+static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
+{
+       return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
+}
+
 /**
  * arc_emac_adjust_link - Adjust the PHY link duplex.
  * @ndev:      Pointer to the net_device structure.
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
                txbd->info = 0;
 
                *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
-
-               if (netif_queue_stopped(ndev))
-                       netif_wake_queue(ndev);
        }
+
+       /* Ensure that txbd_dirty is visible to tx() before checking
+        * for queue stopped.
+        */
+       smp_mb();
+
+       if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
+               netif_wake_queue(ndev);
 }
 
 /**
@@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
        work_done = arc_emac_rx(ndev, budget);
        if (work_done < budget) {
                napi_complete(napi);
-               arc_reg_or(priv, R_ENABLE, RXINT_MASK);
+               arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
        }
 
        return work_done;
@@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
        /* Reset all flags except "MDIO complete" */
        arc_reg_set(priv, R_STATUS, status);
 
-       if (status & RXINT_MASK) {
+       if (status & (RXINT_MASK | TXINT_MASK)) {
                if (likely(napi_schedule_prep(&priv->napi))) {
-                       arc_reg_clr(priv, R_ENABLE, RXINT_MASK);
+                       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
                        __napi_schedule(&priv->napi);
                }
        }
@@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev)
        arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
 
        /* Enable interrupts */
-       arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+       arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 
        /* Set CONTROL */
        arc_reg_set(priv, R_CTRL,
@@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev)
        netif_stop_queue(ndev);
 
        /* Disable interrupts */
-       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 
        /* Disable EMAC */
        arc_reg_clr(priv, R_CTRL, EN_MASK);
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 
        len = max_t(unsigned int, ETH_ZLEN, skb->len);
 
-       /* EMAC still holds this buffer in its possession.
-        * CPU must not modify this buffer descriptor
-        */
-       if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
+       if (unlikely(!arc_emac_tx_avail(priv))) {
                netif_stop_queue(ndev);
+               netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
                return NETDEV_TX_BUSY;
        }
 
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
        /* Increment index to point to the next BD */
        *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
 
-       /* Get "info" of the next BD */
-       info = &priv->txbd[*txbd_curr].info;
+       /* Ensure that tx_clean() sees the new txbd_curr before
+        * checking the queue status. This prevents an unneeded wake
+        * of the queue in tx_clean().
+        */
+       smp_mb();
 
-       /* Check if if Tx BD ring is full - next BD is still owned by EMAC */
-       if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
+       if (!arc_emac_tx_avail(priv)) {
                netif_stop_queue(ndev);
+               /* Refresh tx_dirty */
+               smp_mb();
+               if (arc_emac_tx_avail(priv))
+                       netif_start_queue(ndev);
+       }
 
        arc_reg_set(priv, R_STATUS, TXPL_MASK);
 
index 4a7028d..d588136 100644 (file)
@@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
                                     hwstat->tx_underruns +
                                     hwstat->tx_excessive_cols +
                                     hwstat->tx_late_cols);
-               nstat->multicast  = hwstat->tx_multicast_pkts;
+               nstat->multicast  = hwstat->rx_multicast_pkts;
                nstat->collisions = hwstat->tx_total_cols;
 
                nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
index 6f4e186..d9b9170 100644 (file)
@@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
        while ((processed < to_process) && (processed < budget)) {
                cb = &priv->rx_cbs[priv->rx_read_ptr];
                skb = cb->skb;
+
+               processed++;
+               priv->rx_read_ptr++;
+
+               if (priv->rx_read_ptr == priv->num_rx_bds)
+                       priv->rx_read_ptr = 0;
+
+               /* We do not have a backing SKB, so we do not a corresponding
+                * DMA mapping for this incoming packet since
+                * bcm_sysport_rx_refill always either has both skb and mapping
+                * or none.
+                */
+               if (unlikely(!skb)) {
+                       netif_err(priv, rx_err, ndev, "out of memory!\n");
+                       ndev->stats.rx_dropped++;
+                       ndev->stats.rx_errors++;
+                       goto refill;
+               }
+
                dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
                                 RX_BUF_LENGTH, DMA_FROM_DEVICE);
 
@@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
                          DESC_STATUS_MASK;
 
-               processed++;
-               priv->rx_read_ptr++;
-               if (priv->rx_read_ptr == priv->num_rx_bds)
-                       priv->rx_read_ptr = 0;
-
                netif_dbg(priv, rx_status, ndev,
                          "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
                          p_index, priv->rx_c_index, priv->rx_read_ptr,
                          len, status);
 
-               if (unlikely(!skb)) {
-                       netif_err(priv, rx_err, ndev, "out of memory!\n");
-                       ndev->stats.rx_dropped++;
-                       ndev->stats.rx_errors++;
-                       goto refill;
-               }
-
                if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
                        netif_err(priv, rx_status, ndev, "fragmented packet!\n");
                        ndev->stats.rx_dropped++;
index 3f9d4de..5cc9cae 100644 (file)
@@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
        int last_tx_cn, last_c_index, num_tx_bds;
        struct enet_cb *tx_cb_ptr;
        struct netdev_queue *txq;
+       unsigned int bds_compl;
        unsigned int c_index;
 
        /* Compute how many buffers are transmitted since last xmit call */
@@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
        /* Reclaim transmitted buffers */
        while (last_tx_cn-- > 0) {
                tx_cb_ptr = ring->cbs + last_c_index;
+               bds_compl = 0;
                if (tx_cb_ptr->skb) {
+                       bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
                        dev->stats.tx_bytes += tx_cb_ptr->skb->len;
                        dma_unmap_single(&dev->dev,
                                         dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
                        dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
                }
                dev->stats.tx_packets++;
-               ring->free_bds += 1;
+               ring->free_bds += bds_compl;
 
                last_c_index++;
                last_c_index &= (num_tx_bds - 1);
@@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 
        while ((rxpktprocessed < rxpkttoprocess) &&
               (rxpktprocessed < budget)) {
+               cb = &priv->rx_cbs[priv->rx_read_ptr];
+               skb = cb->skb;
+
+               rxpktprocessed++;
+
+               priv->rx_read_ptr++;
+               priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+
+               /* We do not have a backing SKB, so we do not have a
+                * corresponding DMA mapping for this incoming packet since
+                * bcmgenet_rx_refill always either has both skb and mapping or
+                * none.
+                */
+               if (unlikely(!skb)) {
+                       dev->stats.rx_dropped++;
+                       dev->stats.rx_errors++;
+                       goto refill;
+               }
+
                /* Unmap the packet contents such that we can use the
                 * RSV from the 64 bytes descriptor when enabled and save
                 * a 32-bits register read
                 */
-               cb = &priv->rx_cbs[priv->rx_read_ptr];
-               skb = cb->skb;
                dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
                                 priv->rx_buf_len, DMA_FROM_DEVICE);
 
@@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
                          __func__, p_index, priv->rx_c_index,
                          priv->rx_read_ptr, dma_length_status);
 
-               rxpktprocessed++;
-
-               priv->rx_read_ptr++;
-               priv->rx_read_ptr &= (priv->num_rx_bds - 1);
-
-               /* out of memory, just drop packets at the hardware level */
-               if (unlikely(!skb)) {
-                       dev->stats.rx_dropped++;
-                       dev->stats.rx_errors++;
-                       goto refill;
-               }
-
                if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
                        netif_err(priv, rx_status, dev,
                                  "dropping fragmented packet!\n");
@@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev)
        bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
 }
 
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+       int ret = 0;
+       int timeout = 0;
+       u32 reg;
+
+       /* Disable TDMA to stop add more frames in TX DMA */
+       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+       reg &= ~DMA_EN;
+       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+       /* Check TDMA status register to confirm TDMA is disabled */
+       while (timeout++ < DMA_TIMEOUT_VAL) {
+               reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+               if (reg & DMA_DISABLED)
+                       break;
+
+               udelay(1);
+       }
+
+       if (timeout == DMA_TIMEOUT_VAL) {
+               netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
+               ret = -ETIMEDOUT;
+       }
+
+       /* Wait 10ms for packet drain in both tx and rx dma */
+       usleep_range(10000, 20000);
+
+       /* Disable RDMA */
+       reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+       reg &= ~DMA_EN;
+       bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+       timeout = 0;
+       /* Check RDMA status register to confirm RDMA is disabled */
+       while (timeout++ < DMA_TIMEOUT_VAL) {
+               reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+               if (reg & DMA_DISABLED)
+                       break;
+
+               udelay(1);
+       }
+
+       if (timeout == DMA_TIMEOUT_VAL) {
+               netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
+               ret = -ETIMEDOUT;
+       }
+
+       return ret;
+}
+
 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
        int i;
 
        /* disable DMA */
-       bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
-       bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
+       bcmgenet_dma_teardown(priv);
 
        for (i = 0; i < priv->num_tx_bds; i++) {
                if (priv->tx_cbs[i].skb != NULL) {
@@ -2101,57 +2159,6 @@ err_clk_disable:
        return ret;
 }
 
-static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
-{
-       int ret = 0;
-       int timeout = 0;
-       u32 reg;
-
-       /* Disable TDMA to stop add more frames in TX DMA */
-       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
-       reg &= ~DMA_EN;
-       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
-       /* Check TDMA status register to confirm TDMA is disabled */
-       while (timeout++ < DMA_TIMEOUT_VAL) {
-               reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
-               if (reg & DMA_DISABLED)
-                       break;
-
-               udelay(1);
-       }
-
-       if (timeout == DMA_TIMEOUT_VAL) {
-               netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
-               ret = -ETIMEDOUT;
-       }
-
-       /* Wait 10ms for packet drain in both tx and rx dma */
-       usleep_range(10000, 20000);
-
-       /* Disable RDMA */
-       reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
-       reg &= ~DMA_EN;
-       bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
-       timeout = 0;
-       /* Check RDMA status register to confirm RDMA is disabled */
-       while (timeout++ < DMA_TIMEOUT_VAL) {
-               reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
-               if (reg & DMA_DISABLED)
-                       break;
-
-               udelay(1);
-       }
-
-       if (timeout == DMA_TIMEOUT_VAL) {
-               netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
-               ret = -ETIMEDOUT;
-       }
-
-       return ret;
-}
-
 static void bcmgenet_netif_stop(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
index cb77ae9..e7d3a62 100644 (file)
@@ -7914,8 +7914,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        entry = tnapi->tx_prod;
        base_flags = 0;
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               base_flags |= TXD_FLAG_TCPUDP_CSUM;
 
        mss = skb_shinfo(skb)->gso_size;
        if (mss) {
@@ -7929,6 +7927,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
 
+               /* HW/FW can not correctly segment packets that have been
+                * vlan encapsulated.
+                */
+               if (skb->protocol == htons(ETH_P_8021Q) ||
+                   skb->protocol == htons(ETH_P_8021AD))
+                       return tg3_tso_bug(tp, tnapi, txq, skb);
+
                if (!skb_is_gso_v6(skb)) {
                        if (unlikely((ETH_HLEN + hdr_len) > 80) &&
                            tg3_flag(tp, TSO_BUG))
@@ -7979,6 +7984,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                base_flags |= tsflags << 12;
                        }
                }
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               /* HW/FW can not correctly checksum packets that have been
+                * vlan encapsulated.
+                */
+               if (skb->protocol == htons(ETH_P_8021Q) ||
+                   skb->protocol == htons(ETH_P_8021AD)) {
+                       if (skb_checksum_help(skb))
+                               goto drop;
+               } else  {
+                       base_flags |= TXD_FLAG_TCPUDP_CSUM;
+               }
        }
 
        if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
index 8c34811..e5be511 100644 (file)
@@ -6478,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct port_info *pi;
        bool highdma = false;
        struct adapter *adapter = NULL;
+       void __iomem *regs;
 
        printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 
@@ -6494,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_release_regions;
        }
 
+       regs = pci_ioremap_bar(pdev, 0);
+       if (!regs) {
+               dev_err(&pdev->dev, "cannot map device registers\n");
+               err = -ENOMEM;
+               goto out_disable_device;
+       }
+
+       /* We control everything through one PF */
+       func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
+       if (func != ent->driver_data) {
+               iounmap(regs);
+               pci_disable_device(pdev);
+               pci_save_state(pdev);        /* to restore SR-IOV later */
+               goto sriov;
+       }
+
        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                highdma = true;
                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
                if (err) {
                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
                                "coherent allocations\n");
-                       goto out_disable_device;
+                       goto out_unmap_bar0;
                }
        } else {
                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev, "no usable DMA configuration\n");
-                       goto out_disable_device;
+                       goto out_unmap_bar0;
                }
        }
 
@@ -6518,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
        if (!adapter) {
                err = -ENOMEM;
-               goto out_disable_device;
+               goto out_unmap_bar0;
        }
 
        adapter->workq = create_singlethread_workqueue("cxgb4");
@@ -6530,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* PCI device has been enabled */
        adapter->flags |= DEV_ENABLED;
 
-       adapter->regs = pci_ioremap_bar(pdev, 0);
-       if (!adapter->regs) {
-               dev_err(&pdev->dev, "cannot map device registers\n");
-               err = -ENOMEM;
-               goto out_free_adapter;
-       }
-
-       /* We control everything through one PF */
-       func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI));
-       if (func != ent->driver_data) {
-               pci_save_state(pdev);        /* to restore SR-IOV later */
-               goto sriov;
-       }
-
+       adapter->regs = regs;
        adapter->pdev = pdev;
        adapter->pdev_dev = &pdev->dev;
        adapter->mbox = func;
@@ -6560,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = t4_prep_adapter(adapter);
        if (err)
-               goto out_unmap_bar0;
+               goto out_free_adapter;
+
 
        if (!is_t4(adapter->params.chip)) {
                s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
@@ -6577,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        dev_err(&pdev->dev,
                                "Incorrect number of egress queues per page\n");
                        err = -EINVAL;
-                       goto out_unmap_bar0;
+                       goto out_free_adapter;
                }
                adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
                pci_resource_len(pdev, 2));
                if (!adapter->bar2) {
                        dev_err(&pdev->dev, "cannot map device bar2 region\n");
                        err = -ENOMEM;
-                       goto out_unmap_bar0;
+                       goto out_free_adapter;
                }
        }
 
@@ -6722,13 +6727,13 @@ sriov:
  out_unmap_bar:
        if (!is_t4(adapter->params.chip))
                iounmap(adapter->bar2);
- out_unmap_bar0:
-       iounmap(adapter->regs);
  out_free_adapter:
        if (adapter->workq)
                destroy_workqueue(adapter->workq);
 
        kfree(adapter);
+ out_unmap_bar0:
+       iounmap(regs);
  out_disable_device:
        pci_disable_pcie_error_reporting(pdev);
        pci_disable_device(pdev);
index 9b33057..70089c2 100644 (file)
@@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
        const void *mac_addr;
 
        if (!IS_ENABLED(CONFIG_OF) || !np)
-               return NULL;
+               return ERR_PTR(-ENXIO);
 
        pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
index 65a4a0f..02a2e90 100644 (file)
@@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
 }
 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
 
+static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
+{
+       struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+       int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
+                       + 1;
+       int max_port = min_port +
+               bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+
+       if (port < min_port)
+               port = min_port;
+       else if (port >= max_port)
+               port = max_port - 1;
+
+       return port;
+}
+
 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
        s_info->mac = mac;
        mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
@@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
 
        if ((0 == vlan) && (0 == qos))
@@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
        struct mlx4_priv *priv;
 
        priv = mlx4_priv(dev);
+       port = mlx4_slaves_closest_port(dev, slave, port);
        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 
        if (MLX4_VGT != vp_oper->state.default_vlan) {
@@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
        s_info->spoofchk = setting;
 
@@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        switch (link_state) {
        case IFLA_VF_LINK_STATE_AUTO:
                /* get current link state */
index e22f24f..35ff292 100644 (file)
@@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
        struct mlx4_en_dev *mdev = priv->mdev;
        int err;
 
+       if (pause->autoneg)
+               return -EINVAL;
+
        priv->prof->tx_pause = pause->tx_pause != 0;
        priv->prof->rx_pause = pause->rx_pause != 0;
        err = mlx4_SET_PORT_general(mdev->dev, priv->port,
index 7d717ec..193a6ad 100644 (file)
@@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
                            MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 }
 
+/* Must protect against concurrent access */
 int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                       struct mlx4_mpt_entry ***mpt_entry)
 {
@@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
        int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
        struct mlx4_cmd_mailbox *mailbox = NULL;
 
-       /* Make sure that at this point we have single-threaded access only */
-
        if (mmr->enabled != MLX4_MPT_EN_HW)
                return -EINVAL;
 
        err = mlx4_HW2SW_MPT(dev, NULL, key);
-
        if (err) {
                mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
                mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
@@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                                   0, MLX4_CMD_QUERY_MPT,
                                   MLX4_CMD_TIME_CLASS_B,
                                   MLX4_CMD_WRAPPED);
-
                if (err)
                        goto free_mailbox;
 
@@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                err = mlx4_SW2HW_MPT(dev, mailbox, key);
        }
 
-       mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
-       if (!err)
+       if (!err) {
+               mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
                mmr->enabled = MLX4_MPT_EN_HW;
+       }
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
@@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
 int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
                         u32 pdn)
 {
-       u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags);
+       u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK;
        /* The wrapper function will put the slave's id here */
        if (mlx4_is_mfunc(dev))
                pd_flags &= ~MLX4_MPT_PD_VF_MASK;
-       mpt_entry->pd_flags = cpu_to_be32((pd_flags &  ~MLX4_MPT_PD_MASK) |
+
+       mpt_entry->pd_flags = cpu_to_be32(pd_flags |
                                          (pdn & MLX4_MPT_PD_MASK)
                                          | MLX4_MPT_PD_FLAG_EN_INV);
        return 0;
@@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
 {
        int err;
 
-       mpt_entry->start       = cpu_to_be64(mr->iova);
-       mpt_entry->length      = cpu_to_be64(mr->size);
-       mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+       mpt_entry->start       = cpu_to_be64(iova);
+       mpt_entry->length      = cpu_to_be64(size);
+       mpt_entry->entity_size = cpu_to_be32(page_shift);
 
        err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
        if (err)
                return err;
 
+       mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
+                                          MLX4_MPT_PD_FLAG_EN_INV);
+       mpt_entry->flags    &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
+                                          MLX4_MPT_FLAG_SW_OWNS);
        if (mr->mtt.order < 0) {
                mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
                mpt_entry->mtt_addr = 0;
@@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
                if (mr->mtt.page_shift == 0)
                        mpt_entry->mtt_sz    = cpu_to_be32(1 << mr->mtt.order);
        }
+       if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
+               /* fast register MR in free state */
+               mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
+               mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
+                                                  MLX4_MPT_PD_FLAG_RAE);
+       } else {
+               mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
+       }
        mr->enabled = MLX4_MPT_EN_SW;
 
        return 0;
index 9ba0c1c..94eeb2c 100644 (file)
@@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev,
        int i;
 
        for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-               if ((mac & MLX4_MAC_MASK) ==
+               if (table->refs[i] &&
+                   (MLX4_MAC_MASK & mac) ==
                    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
                        return i;
        }
@@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 
        mutex_lock(&table->mutex);
        for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-               if (free < 0 && !table->entries[i]) {
-                       free = i;
+               if (!table->refs[i]) {
+                       if (free < 0)
+                               free = i;
                        continue;
                }
 
-               if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+               if ((MLX4_MAC_MASK & mac) ==
+                    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
                        /* MAC already registered, increment ref count */
                        err = i;
                        ++table->refs[i];
index 0dc31d8..2301365 100644 (file)
@@ -390,13 +390,14 @@ err_icm:
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
 #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
-int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params)
 {
        struct mlx4_cmd_mailbox *mailbox;
        struct mlx4_update_qp_context *cmd;
        u64 pri_addr_path_mask = 0;
+       u64 qp_mask = 0;
        int err = 0;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
                cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
        }
 
+       if (attr & MLX4_UPDATE_QP_VSD) {
+               qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
+               if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
+                       cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
+       }
+
        cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+       cmd->qp_mask = cpu_to_be64(qp_mask);
 
-       err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+       err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
                       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
                       MLX4_CMD_NATIVE);
 
index 1089367..5d2498d 100644 (file)
@@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
        struct mlx4_qp_context  *qpc = inbox->buf + 8;
        struct mlx4_vport_oper_state *vp_oper;
        struct mlx4_priv *priv;
+       u32 qp_type;
        int port;
 
        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
        priv = mlx4_priv(dev);
        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+       qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
 
        if (MLX4_VGT != vp_oper->state.default_vlan) {
                /* the reserved QPs (special, proxy, tunnel)
@@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                if (mlx4_is_qp_reserved(dev, qpn))
                        return 0;
 
-               /* force strip vlan by clear vsd */
-               qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+               /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
+               if (qp_type == MLX4_QP_ST_UD ||
+                   (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
+                       if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
+                               *(__be32 *)inbox->buf =
+                                       cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
+                                       MLX4_QP_OPTPAR_VLAN_STRIPPING);
+                               qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+                       } else {
+                               struct mlx4_update_qp_params params = {.flags = 0};
+
+                               mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+                       }
+               }
 
                if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
                    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
@@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
        }
 
        port = (rqp->sched_queue >> 6 & 1) + 1;
-       smac_index = cmd->qp_context.pri_path.grh_mylmc;
-       err = mac_find_smac_ix_in_slave(dev, slave, port,
-                                       smac_index, &mac);
-       if (err) {
-               mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
-                        qpn, smac_index);
-               goto err_mac;
+
+       if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
+               smac_index = cmd->qp_context.pri_path.grh_mylmc;
+               err = mac_find_smac_ix_in_slave(dev, slave, port,
+                                               smac_index, &mac);
+
+               if (err) {
+                       mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+                                qpn, smac_index);
+                       goto err_mac;
+               }
        }
 
        err = mlx4_cmd(dev, inbox->dma,
@@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 
        upd_context = mailbox->buf;
-       upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
+       upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
index 979c698..a422930 100644 (file)
@@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
                /* Read the hardware TX timestamp if one was recorded */
                if (unlikely(re.s.tstamp)) {
                        struct skb_shared_hwtstamps ts;
+                       u64 ns;
+
                        memset(&ts, 0, sizeof(ts));
                        /* Read the timestamp */
-                       u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
+                       ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
                        /* Remove the timestamp from the FIFO */
                        cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
                        /* Tell the kernel about the timestamp */
index 44c8be1..5f7a352 100644 (file)
@@ -7,6 +7,7 @@ config PCH_GBE
        depends on PCI && (X86_32 || COMPILE_TEST)
        select MII
        select PTP_1588_CLOCK_PCH
+       select NET_PTP_CLASSIFY
        ---help---
          This is a gigabit ethernet driver for EG20T PCH.
          EG20T PCH is the platform controller hub that is used in Intel's
index 91652e7..0921302 100644 (file)
@@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev,
                                   netdev_features_t features)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
-       netdev_features_t changed = features ^ dev->features;
        void __iomem *ioaddr = tp->mmio_addr;
+       u32 rx_config;
 
-       if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
-                        NETIF_F_HW_VLAN_CTAG_RX)))
-               return;
+       rx_config = RTL_R32(RxConfig);
+       if (features & NETIF_F_RXALL)
+               rx_config |= (AcceptErr | AcceptRunt);
+       else
+               rx_config &= ~(AcceptErr | AcceptRunt);
 
-       if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
-               if (features & NETIF_F_RXCSUM)
-                       tp->cp_cmd |= RxChkSum;
-               else
-                       tp->cp_cmd &= ~RxChkSum;
+       RTL_W32(RxConfig, rx_config);
 
-               if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
-                       tp->cp_cmd |= RxVlan;
-               else
-                       tp->cp_cmd &= ~RxVlan;
+       if (features & NETIF_F_RXCSUM)
+               tp->cp_cmd |= RxChkSum;
+       else
+               tp->cp_cmd &= ~RxChkSum;
 
-               RTL_W16(CPlusCmd, tp->cp_cmd);
-               RTL_R16(CPlusCmd);
-       }
-       if (changed & NETIF_F_RXALL) {
-               int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
-               if (features & NETIF_F_RXALL)
-                       tmp |= (AcceptErr | AcceptRunt);
-               RTL_W32(RxConfig, tmp);
-       }
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               tp->cp_cmd |= RxVlan;
+       else
+               tp->cp_cmd &= ~RxVlan;
+
+       tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum);
+
+       RTL_W16(CPlusCmd, tp->cp_cmd);
+       RTL_R16(CPlusCmd);
 }
 
 static int rtl8169_set_features(struct net_device *dev,
@@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev,
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
+       features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
+
        rtl_lock_work(tp);
-       __rtl8169_set_features(dev, features);
+       if (features ^ dev->features)
+               __rtl8169_set_features(dev, features);
        rtl_unlock_work(tp);
 
        return 0;
@@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
        }
 }
 
-static int
-rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
        const unsigned int region = cfg->region;
@@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_mwi_2;
        }
 
-       tp->cp_cmd = RxChkSum;
+       tp->cp_cmd = 0;
 
        if ((sizeof(dma_addr_t) > 4) &&
            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_master(pdev);
 
-       /*
-        * Pretend we are using VLANs; This bypasses a nasty bug where
-        * Interrupts stop flowing on high load on 8110SCd controllers.
-        */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_05)
-               tp->cp_cmd |= RxVlan;
-
        rtl_init_mdio_ops(tp);
        rtl_init_pll_power_ops(tp);
        rtl_init_jumbo_ops(tp);
@@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
                NETIF_F_HIGHDMA;
 
+       tp->cp_cmd |= RxChkSum | RxVlan;
+
+       /*
+        * Pretend we are using VLANs; This bypasses a nasty bug where
+        * Interrupts stop flowing on high load on 8110SCd controllers.
+        */
        if (tp->mac_version == RTL_GIGA_MAC_VER_05)
-               /* 8110SCd requires hardware Rx VLAN - disallow toggling */
+               /* Disallow toggling */
                dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
 
        if (tp->txd_version == RTL_TD_0)
index 0537381..6859437 100644 (file)
@@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
        u32 crc;
        int bit;
 
+       if (!efx_dev_registered(efx))
+               return;
+
        netif_addr_lock_bh(net_dev);
 
        efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
index 23c89ab..f675396 100644 (file)
@@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port,
        if (IS_ERR(desc))
                return PTR_ERR(desc);
 
+       if (desc->hdr.state != VIO_DESC_READY)
+               return 1;
+
+       rmb();
+
        viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
               desc->hdr.state, desc->hdr.ack,
               desc->size, desc->ncookies,
               desc->cookies[0].cookie_addr,
               desc->cookies[0].cookie_size);
 
-       if (desc->hdr.state != VIO_DESC_READY)
-               return 1;
        err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
        if (err == -ECONNRESET)
                return err;
index 999fb72..e2a0028 100644 (file)
@@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status)
        cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
 
        if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
+               bool ndev_status = false;
+               struct cpsw_slave *slave = priv->slaves;
+               int n;
+
+               if (priv->data.dual_emac) {
+                       /* In dual emac mode check for all interfaces */
+                       for (n = priv->data.slaves; n; n--, slave++)
+                               if (netif_running(slave->ndev))
+                                       ndev_status = true;
+               }
+
+               if (ndev_status && (status >= 0)) {
+                       /* The packet received is for the interface which
+                        * is already down and the other interface is up
+                        * and running, intead of freeing which results
+                        * in reducing of the number of rx descriptor in
+                        * DMA engine, requeue skb back to cpdma.
+                        */
+                       new_skb = skb;
+                       goto requeue;
+               }
+
                /* the interface is going down, skbs are purged */
                dev_kfree_skb_any(skb);
                return;
@@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
                new_skb = skb;
        }
 
+requeue:
        ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
                        skb_tailroom(new_skb), 0);
        if (WARN_ON(ret < 0))
@@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev)
        struct net_device       *ndev = platform_get_drvdata(pdev);
        struct cpsw_priv        *priv = netdev_priv(ndev);
 
-       if (netif_running(ndev))
-               cpsw_ndo_stop(ndev);
+       if (priv->data.dual_emac) {
+               int i;
 
-       for_each_slave(priv, soft_reset_slave);
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (netif_running(priv->slaves[i].ndev))
+                               cpsw_ndo_stop(priv->slaves[i].ndev);
+                       soft_reset_slave(priv->slaves + i);
+               }
+       } else {
+               if (netif_running(ndev))
+                       cpsw_ndo_stop(ndev);
+               for_each_slave(priv, soft_reset_slave);
+       }
 
        pm_runtime_put_sync(&pdev->dev);
 
@@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev)
 {
        struct platform_device  *pdev = to_platform_device(dev);
        struct net_device       *ndev = platform_get_drvdata(pdev);
+       struct cpsw_priv        *priv = netdev_priv(ndev);
 
        pm_runtime_get_sync(&pdev->dev);
 
        /* Select default pin state */
        pinctrl_pm_select_default_state(&pdev->dev);
 
-       if (netif_running(ndev))
-               cpsw_ndo_open(ndev);
+       if (priv->data.dual_emac) {
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (netif_running(priv->slaves[i].ndev))
+                               cpsw_ndo_open(priv->slaves[i].ndev);
+               }
+       } else {
+               if (netif_running(ndev))
+                       cpsw_ndo_open(ndev);
+       }
        return 0;
 }
 
index a969555..726edab 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/netpoll.h>
 
 #define MACVLAN_HASH_SIZE      (1 << BITS_PER_BYTE)
+#define MACVLAN_BC_QUEUE_LEN   1000
 
 struct macvlan_port {
        struct net_device       *dev;
@@ -248,7 +249,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
                goto err;
 
        spin_lock(&port->bc_queue.lock);
-       if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
+       if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
                __skb_queue_tail(&port->bc_queue, nskb);
                err = 0;
        }
@@ -806,6 +807,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
                                             features,
                                             mask);
        features |= ALWAYS_ON_FEATURES;
+       features &= ~NETIF_F_NETNS_LOCAL;
 
        return features;
 }
index fd0ea7c..011dbda 100644 (file)
@@ -592,8 +592,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ9031,
        .phy_id_mask    = 0x00fffff0,
        .name           = "Micrel KSZ9031 Gigabit PHY",
-       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause
-                               | SUPPORTED_Asym_Pause),
+       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = ksz9031_config_init,
        .config_aneg    = genphy_config_aneg,
index 87f7104..74760e8 100644 (file)
@@ -2019,7 +2019,7 @@ static int rtl8153_enable(struct r8152 *tp)
        return rtl_enable(tp);
 }
 
-static void rtl8152_disable(struct r8152 *tp)
+static void rtl_disable(struct r8152 *tp)
 {
        u32 ocp_data;
        int i;
@@ -2232,6 +2232,13 @@ static inline void r8152b_enable_aldps(struct r8152 *tp)
                                            LINKENA | DIS_SDSAVE);
 }
 
+static void rtl8152_disable(struct r8152 *tp)
+{
+       r8152b_disable_aldps(tp);
+       rtl_disable(tp);
+       r8152b_enable_aldps(tp);
+}
+
 static void r8152b_hw_phy_cfg(struct r8152 *tp)
 {
        u16 data;
@@ -2242,11 +2249,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
                r8152_mdio_write(tp, MII_BMCR, data);
        }
 
-       r8152b_disable_aldps(tp);
-
        rtl_clear_bp(tp);
 
-       r8152b_enable_aldps(tp);
        set_bit(PHY_RESET, &tp->flags);
 }
 
@@ -2255,9 +2259,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
        u32 ocp_data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
-               return;
-
        ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
        ocp_data &= ~RCR_ACPT_ALL;
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@@ -2347,7 +2348,7 @@ static void r8152b_enter_oob(struct r8152 *tp)
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
 
-       rtl8152_disable(tp);
+       rtl_disable(tp);
 
        for (i = 0; i < 1000; i++) {
                ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2485,9 +2486,6 @@ static void r8153_first_init(struct r8152 *tp)
        u32 ocp_data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
-               return;
-
        rxdy_gated_en(tp, true);
        r8153_teredo_off(tp);
 
@@ -2560,7 +2558,7 @@ static void r8153_enter_oob(struct r8152 *tp)
        ocp_data &= ~NOW_IS_OOB;
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
-       rtl8152_disable(tp);
+       rtl_disable(tp);
 
        for (i = 0; i < 1000; i++) {
                ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2624,6 +2622,13 @@ static void r8153_enable_aldps(struct r8152 *tp)
        ocp_reg_write(tp, OCP_POWER_CFG, data);
 }
 
+static void rtl8153_disable(struct r8152 *tp)
+{
+       r8153_disable_aldps(tp);
+       rtl_disable(tp);
+       r8153_enable_aldps(tp);
+}
+
 static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
 {
        u16 bmcr, anar, gbcr;
@@ -2714,6 +2719,16 @@ out:
        return ret;
 }
 
+static void rtl8152_up(struct r8152 *tp)
+{
+       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               return;
+
+       r8152b_disable_aldps(tp);
+       r8152b_exit_oob(tp);
+       r8152b_enable_aldps(tp);
+}
+
 static void rtl8152_down(struct r8152 *tp)
 {
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
@@ -2727,6 +2742,16 @@ static void rtl8152_down(struct r8152 *tp)
        r8152b_enable_aldps(tp);
 }
 
+static void rtl8153_up(struct r8152 *tp)
+{
+       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               return;
+
+       r8153_disable_aldps(tp);
+       r8153_first_init(tp);
+       r8153_enable_aldps(tp);
+}
+
 static void rtl8153_down(struct r8152 *tp)
 {
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
@@ -2946,6 +2971,8 @@ static void r8152b_init(struct r8152 *tp)
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
+       r8152b_disable_aldps(tp);
+
        if (tp->version == RTL_VER_01) {
                ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
                ocp_data &= ~LED_MODE_MASK;
@@ -2984,6 +3011,7 @@ static void r8153_init(struct r8152 *tp)
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
+       r8153_disable_aldps(tp);
        r8153_u1u2en(tp, false);
 
        for (i = 0; i < 500; i++) {
@@ -3392,7 +3420,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                        ops->init               = r8152b_init;
                        ops->enable             = rtl8152_enable;
                        ops->disable            = rtl8152_disable;
-                       ops->up                 = r8152b_exit_oob;
+                       ops->up                 = rtl8152_up;
                        ops->down               = rtl8152_down;
                        ops->unload             = rtl8152_unload;
                        ret = 0;
@@ -3400,8 +3428,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                case PRODUCT_ID_RTL8153:
                        ops->init               = r8153_init;
                        ops->enable             = rtl8153_enable;
-                       ops->disable            = rtl8152_disable;
-                       ops->up                 = r8153_first_init;
+                       ops->disable            = rtl8153_disable;
+                       ops->up                 = rtl8153_up;
                        ops->down               = rtl8153_down;
                        ops->unload             = rtl8153_unload;
                        ret = 0;
@@ -3416,8 +3444,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                case PRODUCT_ID_SAMSUNG:
                        ops->init               = r8153_init;
                        ops->enable             = rtl8153_enable;
-                       ops->disable            = rtl8152_disable;
-                       ops->up                 = r8153_first_init;
+                       ops->disable            = rtl8153_disable;
+                       ops->up                 = rtl8153_up;
                        ops->down               = rtl8153_down;
                        ops->unload             = rtl8153_unload;
                        ret = 0;
index 733be51..6ad4447 100644 (file)
@@ -57,7 +57,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
                                 struct ath9k_beacon_state *bs)
 {
        struct ath_common *common = ath9k_hw_common(ah);
-       int dtim_intval, sleepduration;
+       int dtim_intval;
        u64 tsf;
 
        /* No need to configure beacon if we are not associated */
@@ -75,7 +75,6 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
         * last beacon we received (which may be none).
         */
        dtim_intval = conf->intval * conf->dtim_period;
-       sleepduration = ah->hw->conf.listen_interval * conf->intval;
 
        /*
         * Pull nexttbtt forward to reflect the current
@@ -113,7 +112,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
         */
 
        bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
-                                                 sleepduration));
+                                                 conf->intval));
        if (bs->bs_sleepduration > bs->bs_dtimperiod)
                bs->bs_sleepduration = bs->bs_dtimperiod;
 
index bb86eb2..f0484b1 100644 (file)
@@ -978,7 +978,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
        struct ath_hw *ah = common->ah;
        struct ath_htc_rx_status *rxstatus;
        struct ath_rx_status rx_stats;
-       bool decrypt_error;
+       bool decrypt_error = false;
 
        if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
                ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
index b8e2561..fe3dc12 100644 (file)
@@ -27,10 +27,17 @@ config BRCMFMAC
          one of the bus interface support. If you choose to build a module,
          it'll be called brcmfmac.ko.
 
+config BRCMFMAC_PROTO_BCDC
+       bool
+
+config BRCMFMAC_PROTO_MSGBUF
+       bool
+
 config BRCMFMAC_SDIO
        bool "SDIO bus interface support for FullMAC driver"
        depends on (MMC = y || MMC = BRCMFMAC)
        depends on BRCMFMAC
+       select BRCMFMAC_PROTO_BCDC
        select FW_LOADER
        default y
        ---help---
@@ -42,6 +49,7 @@ config BRCMFMAC_USB
        bool "USB bus interface support for FullMAC driver"
        depends on (USB = y || USB = BRCMFMAC)
        depends on BRCMFMAC
+       select BRCMFMAC_PROTO_BCDC
        select FW_LOADER
        ---help---
          This option enables the USB bus interface support for Broadcom
@@ -52,6 +60,8 @@ config BRCMFMAC_PCIE
        bool "PCIE bus interface support for FullMAC driver"
        depends on BRCMFMAC
        depends on PCI
+       depends on HAS_DMA
+       select BRCMFMAC_PROTO_MSGBUF
        select FW_LOADER
        ---help---
          This option enables the PCIE bus interface support for Broadcom
index c35adf4..90a977f 100644 (file)
@@ -30,16 +30,18 @@ brcmfmac-objs += \
                fwsignal.o \
                p2p.o \
                proto.o \
-               bcdc.o \
-               commonring.o \
-               flowring.o \
-               msgbuf.o \
                dhd_common.o \
                dhd_linux.o \
                firmware.o \
                feature.o \
                btcoex.o \
                vendor.o
+brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
+               bcdc.o
+brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
+               commonring.o \
+               flowring.o \
+               msgbuf.o
 brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
                dhd_sdio.o \
                bcmsdh.o
index 17e8c03..6003179 100644 (file)
 #ifndef BRCMFMAC_BCDC_H
 #define BRCMFMAC_BCDC_H
 
-
+#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
 int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
 void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
-
+#else
+static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
+static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
+#endif
 
 #endif /* BRCMFMAC_BCDC_H */
index 4f1daab..44fc85f 100644 (file)
@@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
                  ifevent->action, ifevent->ifidx, ifevent->bssidx,
                  ifevent->flags, ifevent->role);
 
-       if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
+       /* The P2P Device interface event must not be ignored
+        * contrary to what firmware tells us. The only way to
+        * distinguish the P2P Device is by looking at the ifidx
+        * and bssidx received.
+        */
+       if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) &&
+           (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
                brcmf_dbg(EVENT, "event can be ignored\n");
                return;
        }
@@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
                                return;
        }
 
-       if (ifevent->action == BRCMF_E_IF_CHANGE)
+       if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
                brcmf_fws_reset_interface(ifp);
 
        err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
 
-       if (ifevent->action == BRCMF_E_IF_DEL) {
+       if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
                brcmf_fws_del_interface(ifp);
                brcmf_del_if(drvr, ifevent->bssidx);
        }
index dd20b18..cbf033f 100644 (file)
@@ -172,6 +172,8 @@ enum brcmf_fweh_event_code {
 #define BRCMF_E_IF_ROLE_STA                    0
 #define BRCMF_E_IF_ROLE_AP                     1
 #define BRCMF_E_IF_ROLE_WDS                    2
+#define BRCMF_E_IF_ROLE_P2P_GO                 3
+#define BRCMF_E_IF_ROLE_P2P_CLIENT             4
 
 /**
  * definitions for event packet validation.
index f901ae5..77a51b8 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef BRCMFMAC_MSGBUF_H
 #define BRCMFMAC_MSGBUF_H
 
+#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
 
 #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM      20
 #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM       256
 
 
 int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
-
+#else
+static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
+{
+       return 0;
+}
+static inline void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) {}
+#endif
 
 #endif /* BRCMFMAC_MSGBUF_H */
index 02fe706..f3a9804 100644 (file)
@@ -497,8 +497,11 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
 static void
 brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
 {
-       struct net_device *ndev = wdev->netdev;
-       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_cfg80211_vif *vif;
+       struct brcmf_if *ifp;
+
+       vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+       ifp = vif->ifp;
 
        if ((wdev->iftype == NL80211_IFTYPE_ADHOC) ||
            (wdev->iftype == NL80211_IFTYPE_AP) ||
@@ -5143,6 +5146,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
 
                ch.band = BRCMU_CHAN_BAND_2G;
                ch.bw = BRCMU_CHAN_BW_40;
+               ch.sb = BRCMU_CHAN_SB_NONE;
                ch.chnum = 0;
                cfg->d11inf.encchspec(&ch);
 
@@ -5176,6 +5180,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
 
                        brcmf_update_bw40_channel_flag(&band->channels[j], &ch);
                }
+               kfree(pbuf);
        }
        return err;
 }
index 760c45c..1513dbc 100644 (file)
@@ -40,7 +40,7 @@
 #include "commands.h"
 #include "power.h"
 
-static bool force_cam;
+static bool force_cam = true;
 module_param(force_cam, bool, 0644);
 MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)");
 
index d67a37a..d53adc2 100644 (file)
@@ -83,6 +83,8 @@
 #define IWL7260_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL3160_NVM_VERSION            0x709
 #define IWL3160_TX_POWER_VERSION       0xffff /* meaningless */
+#define IWL3165_NVM_VERSION            0x709
+#define IWL3165_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL7265_NVM_VERSION            0x0a1d
 #define IWL7265_TX_POWER_VERSION       0xffff /* meaningless */
 
@@ -92,6 +94,9 @@
 #define IWL3160_FW_PRE "iwlwifi-3160-"
 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
 
+#define IWL3165_FW_PRE "iwlwifi-3165-"
+#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
+
 #define IWL7265_FW_PRE "iwlwifi-7265-"
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
@@ -213,6 +218,16 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
        {0},
 };
 
+const struct iwl_cfg iwl3165_2ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC 3165",
+       .fw_name_pre = IWL3165_FW_PRE,
+       IWL_DEVICE_7000,
+       .ht_params = &iwl7000_ht_params,
+       .nvm_ver = IWL3165_NVM_VERSION,
+       .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
+       .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+};
+
 const struct iwl_cfg iwl7265_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 7265",
        .fw_name_pre = IWL7265_FW_PRE,
@@ -245,4 +260,5 @@ const struct iwl_cfg iwl7265_n_cfg = {
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
index 8da596d..3d7cc37 100644 (file)
@@ -120,6 +120,8 @@ enum iwl_led_mode {
 #define IWL_LONG_WD_TIMEOUT    10000
 #define IWL_MAX_WD_TIMEOUT     120000
 
+#define IWL_DEFAULT_MAX_TX_POWER 22
+
 /* Antenna presence definitions */
 #define        ANT_NONE        0x0
 #define        ANT_A           BIT(0)
@@ -335,6 +337,7 @@ extern const struct iwl_cfg iwl7260_n_cfg;
 extern const struct iwl_cfg iwl3160_2ac_cfg;
 extern const struct iwl_cfg iwl3160_2n_cfg;
 extern const struct iwl_cfg iwl3160_n_cfg;
+extern const struct iwl_cfg iwl3165_2ac_cfg;
 extern const struct iwl_cfg iwl7265_2ac_cfg;
 extern const struct iwl_cfg iwl7265_2n_cfg;
 extern const struct iwl_cfg iwl7265_n_cfg;
index 018af29..354255f 100644 (file)
@@ -146,8 +146,6 @@ static const u8 iwl_nvm_channels_family_8000[] = {
 #define LAST_2GHZ_HT_PLUS              9
 #define LAST_5GHZ_HT                   161
 
-#define DEFAULT_MAX_TX_POWER 16
-
 /* rate data (static) */
 static struct ieee80211_rate iwl_cfg80211_rates[] = {
        { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
@@ -295,7 +293,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                 * Default value - highest tx power value.  max_power
                 * is not used in mvm, and is used for backwards compatibility
                 */
-               channel->max_power = DEFAULT_MAX_TX_POWER;
+               channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
                is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
                IWL_DEBUG_EEPROM(dev,
                                 "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
index 2291bbc..ce71625 100644 (file)
@@ -585,8 +585,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
        lockdep_assert_held(&mvm->mutex);
 
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
-               u32 mode;
-
                switch (mvm->bt_force_ant_mode) {
                case BT_FORCE_ANT_BT:
                        mode = BT_COEX_BT;
@@ -756,7 +754,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        struct iwl_bt_iterator_data *data = _data;
        struct iwl_mvm *mvm = data->mvm;
        struct ieee80211_chanctx_conf *chanctx_conf;
-       enum ieee80211_smps_mode smps_mode;
+       /* default smps_mode is AUTOMATIC - only used for client modes */
+       enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
        u32 bt_activity_grading;
        int ave_rssi;
 
@@ -764,8 +763,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
-               /* default smps_mode for BSS / P2P client is AUTOMATIC */
-               smps_mode = IEEE80211_SMPS_AUTOMATIC;
                break;
        case NL80211_IFTYPE_AP:
                if (!mvmvif->ap_ibss_active)
@@ -797,7 +794,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        else if (bt_activity_grading >= BT_LOW_TRAFFIC)
                smps_mode = IEEE80211_SMPS_DYNAMIC;
 
-       /* relax SMPS contraints for next association */
+       /* relax SMPS constraints for next association */
        if (!vif->bss_conf.assoc)
                smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
index 2e90ff7..87e517b 100644 (file)
@@ -74,8 +74,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
 
        switch (param) {
        case MVM_DEBUGFS_PM_KEEP_ALIVE: {
-               struct ieee80211_hw *hw = mvm->hw;
-               int dtimper = hw->conf.ps_dtim_period ?: 1;
+               int dtimper = vif->bss_conf.dtim_period ?: 1;
                int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
 
                IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
index 95f5b32..9a922f3 100644 (file)
@@ -1563,14 +1563,14 @@ enum iwl_sf_scenario {
 
 /**
  * Smart Fifo configuration command.
- * @state: smart fifo state, types listed in iwl_sf_sate.
+ * @state: smart fifo state, types listed in enum %iwl_sf_sate.
  * @watermark: Minimum allowed availabe free space in RXF for transient state.
  * @long_delay_timeouts: aging and idle timer values for each scenario
  * in long delay state.
  * @full_on_timeouts: timer values for each scenario in full on state.
  */
 struct iwl_sf_cfg_cmd {
-       enum iwl_sf_state state;
+       __le32 state;
        __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
        __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
        __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
index 0e523e2..8242e68 100644 (file)
@@ -721,11 +721,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
            !force_assoc_off) {
                u32 dtim_offs;
 
-               /* Allow beacons to pass through as long as we are not
-                * associated, or we do not have dtim period information.
-                */
-               cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
-
                /*
                 * The DTIM count counts down, so when it is N that means N
                 * more beacon intervals happen until the DTIM TBTT. Therefore
@@ -759,6 +754,11 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
                ctxt_sta->is_assoc = cpu_to_le32(1);
        } else {
                ctxt_sta->is_assoc = cpu_to_le32(0);
+
+               /* Allow beacons to pass through as long as we are not
+                * associated, or we do not have dtim period information.
+                */
+               cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
        }
 
        ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
index 7c87965..cdc272d 100644 (file)
@@ -396,12 +396,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        else
                hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
-       /* TODO: enable that only for firmwares that don't crash */
-       /* hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; */
-       hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
-       hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
-       /* we create the 802.11 header and zero length SSID IE. */
-       hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+       if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) {
+               hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+               hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+               hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+               /* we create the 802.11 header and zero length SSID IE. */
+               hw->wiphy->max_sched_scan_ie_len =
+                       SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+       }
 
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
                               NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -1524,11 +1526,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                 */
                iwl_mvm_remove_time_event(mvm, mvmvif,
                                          &mvmvif->time_event_data);
-       } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
-                             BSS_CHANGED_QOS)) {
-               ret = iwl_mvm_power_update_mac(mvm);
-               if (ret)
-                       IWL_ERR(mvm, "failed to update power mode\n");
        }
 
        if (changes & BSS_CHANGED_BEACON_INFO) {
@@ -1536,6 +1533,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
        }
 
+       if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
+               ret = iwl_mvm_power_update_mac(mvm);
+               if (ret)
+                       IWL_ERR(mvm, "failed to update power mode\n");
+       }
+
        if (changes & BSS_CHANGED_TXPOWER) {
                IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
                                bss_conf->txpower);
index 2b2d108..d9769a2 100644 (file)
@@ -281,7 +281,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif,
                                    struct iwl_mac_power_cmd *cmd)
 {
-       struct ieee80211_hw *hw = mvm->hw;
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_channel *chan;
        int dtimper, dtimper_msec;
@@ -292,7 +291,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 
        cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
                                                            mvmvif->color));
-       dtimper = hw->conf.ps_dtim_period ?: 1;
+       dtimper = vif->bss_conf.dtim_period;
 
        /*
         * Regardless of power management state the driver must set
@@ -885,7 +884,7 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
        iwl_mvm_power_build_cmd(mvm, vif, &cmd);
        if (enable) {
                /* configure skip over dtim up to 300 msec */
-               int dtimper = mvm->hw->conf.ps_dtim_period ?: 1;
+               int dtimper = vif->bss_conf.dtim_period ?: 1;
                int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
 
                if (WARN_ON(!dtimper_msec))
index 4b98987..bf5cd8c 100644 (file)
@@ -149,13 +149,13 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
            le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
        energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
                                                IWL_RX_INFO_ENERGY_ANT_A_POS;
-       energy_a = energy_a ? -energy_a : -256;
+       energy_a = energy_a ? -energy_a : S8_MIN;
        energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
                                                IWL_RX_INFO_ENERGY_ANT_B_POS;
-       energy_b = energy_b ? -energy_b : -256;
+       energy_b = energy_b ? -energy_b : S8_MIN;
        energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
                                                IWL_RX_INFO_ENERGY_ANT_C_POS;
-       energy_c = energy_c ? -energy_c : -256;
+       energy_c = energy_c ? -energy_c : S8_MIN;
        max_energy = max(energy_a, energy_b);
        max_energy = max(max_energy, energy_c);
 
index 7edfd15..e843b67 100644 (file)
@@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
                             enum iwl_sf_state new_state)
 {
        struct iwl_sf_cfg_cmd sf_cmd = {
-               .state = new_state,
+               .state = cpu_to_le32(new_state),
        };
        struct ieee80211_sta *sta;
        int ret = 0;
index dbc8707..9ee410b 100644 (file)
@@ -168,10 +168,14 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
 
        /*
         * for data packets, rate info comes from the table inside the fw. This
-        * table is controlled by LINK_QUALITY commands
+        * table is controlled by LINK_QUALITY commands. Exclude ctrl port
+        * frames like EAPOLs which should be treated as mgmt frames. This
+        * avoids them being sent initially in high rates which increases the
+        * chances for completion of the 4-Way handshake.
         */
 
-       if (ieee80211_is_data(fc) && sta) {
+       if (ieee80211_is_data(fc) && sta &&
+           !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
                tx_cmd->initial_rate_index = 0;
                tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
                return;
index f0e722c..073a68b 100644 (file)
@@ -352,11 +352,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
        {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
 
+/* 3165 Series */
+       {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
+
 /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
@@ -378,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
index 9dd63b8..e9bf2f4 100644 (file)
@@ -510,7 +510,7 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
 
        WARN_ON(nt->mw[mw_num].virt_addr == NULL);
 
-       if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
+       if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
                num_qps_mw = nt->max_qps / mw_max + 1;
        else
                num_qps_mw = nt->max_qps / mw_max;
@@ -576,6 +576,19 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
                return -ENOMEM;
        }
 
+       /*
+        * we must ensure that the memory address allocated is BAR size
+        * aligned in order for the XLAT register to take the value. This
+        * is a requirement of the hardware. It is recommended to setup CMA
+        * for BAR sizes equal or greater than 4MB.
+        */
+       if (!IS_ALIGNED(mw->dma_addr, mw->size)) {
+               dev_err(&pdev->dev, "DMA memory %pad not aligned to BAR size\n",
+                       &mw->dma_addr);
+               ntb_free_mw(nt, num_mw);
+               return -ENOMEM;
+       }
+
        /* Notify HW the memory location of the receive buffer */
        ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
 
@@ -856,7 +869,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
        qp->client_ready = NTB_LINK_DOWN;
        qp->event_handler = NULL;
 
-       if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
+       if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
                num_qps_mw = nt->max_qps / mw_max + 1;
        else
                num_qps_mw = nt->max_qps / mw_max;
index 9eae983..a0580af 100644 (file)
@@ -913,7 +913,7 @@ static int __init dino_probe(struct parisc_device *dev)
        printk("%s version %s found at 0x%lx\n", name, version, hpa);
 
        if (!request_mem_region(hpa, PAGE_SIZE, name)) {
-               printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n",
+               printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%lx)!\n",
                        hpa);
                return 1;
        }
index 0f54ab6..3651c38 100644 (file)
@@ -278,7 +278,7 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
 {
        struct hardware_path hwpath;
        unsigned short i;
-       char in[count+1], *temp;
+       char in[64], *temp;
        struct device *dev;
        int ret;
 
@@ -286,8 +286,9 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
                return -EINVAL;
 
        /* We'll use a local copy of buf */
-       memset(in, 0, count+1);
+       count = min_t(size_t, count, sizeof(in)-1);
        strncpy(in, buf, count);
+       in[count] = '\0';
        
        /* Let's clean up the target. 0xff is a blank pattern */
        memset(&hwpath, 0xff, sizeof(hwpath));
@@ -393,14 +394,15 @@ pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count
 {
        unsigned int layers[6]; /* device-specific info (ctlr#, unit#, ...) */
        unsigned short i;
-       char in[count+1], *temp;
+       char in[64], *temp;
 
        if (!entry || !buf || !count)
                return -EINVAL;
 
        /* We'll use a local copy of buf */
-       memset(in, 0, count+1);
+       count = min_t(size_t, count, sizeof(in)-1);
        strncpy(in, buf, count);
+       in[count] = '\0';
        
        /* Let's clean up the target. 0 is a blank pattern */
        memset(&layers, 0, sizeof(layers));
@@ -755,7 +757,7 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
 {
        struct pdcspath_entry *pathentry;
        unsigned char flags;
-       char in[count+1], *temp;
+       char in[8], *temp;
        char c;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -765,8 +767,9 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
                return -EINVAL;
 
        /* We'll use a local copy of buf */
-       memset(in, 0, count+1);
+       count = min_t(size_t, count, sizeof(in)-1);
        strncpy(in, buf, count);
+       in[count] = '\0';
 
        /* Current flags are stored in primary boot path entry */
        pathentry = &pdcspath_entry_primary;
index a568efa..35fc73a 100644 (file)
@@ -49,6 +49,9 @@ struct imx6_pcie {
 
 /* PCIe Port Logic registers (memory-mapped) */
 #define PL_OFFSET 0x700
+#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
+#define PCIE_PL_PFLR_LINK_STATE_MASK           (0x3f << 16)
+#define PCIE_PL_PFLR_FORCE_LINK                        (1 << 15)
 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING        (1 << 29)
@@ -214,6 +217,32 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
 static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
 {
        struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+       u32 val, gpr1, gpr12;
+
+       /*
+        * If the bootloader already enabled the link we need some special
+        * handling to get the core back into a state where it is safe to
+        * touch it for configuration.  As there is no dedicated reset signal
+        * wired up for MX6QDL, we need to manually force LTSSM into "detect"
+        * state before completely disabling LTSSM, which is a prerequisite
+        * for core configuration.
+        *
+        * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
+        * indication that the bootloader activated the link.
+        */
+       regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
+       regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
+
+       if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
+           (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
+               val = readl(pp->dbi_base + PCIE_PL_PFLR);
+               val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
+               val |= PCIE_PL_PFLR_FORCE_LINK;
+               writel(val, pp->dbi_base + PCIE_PL_PFLR);
+
+               regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                               IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+       }
 
        regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
                        IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
@@ -589,6 +618,14 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
        return 0;
 }
 
+static void imx6_pcie_shutdown(struct platform_device *pdev)
+{
+       struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+
+       /* bring down link, so bootloader gets clean state in case of reboot */
+       imx6_pcie_assert_core_reset(&imx6_pcie->pp);
+}
+
 static const struct of_device_id imx6_pcie_of_match[] = {
        { .compatible = "fsl,imx6q-pcie", },
        {},
@@ -601,6 +638,7 @@ static struct platform_driver imx6_pcie_driver = {
                .owner  = THIS_MODULE,
                .of_match_table = imx6_pcie_of_match,
        },
+       .shutdown = imx6_pcie_shutdown,
 };
 
 /* Freescale PCIe driver does not allow module unload */
index 70741c8..6cd5160 100644 (file)
@@ -560,19 +560,15 @@ static void disable_slot(struct acpiphp_slot *slot)
        slot->flags &= (~SLOT_ENABLED);
 }
 
-static bool acpiphp_no_hotplug(struct acpi_device *adev)
-{
-       return adev && adev->flags.no_hotplug;
-}
-
 static bool slot_no_hotplug(struct acpiphp_slot *slot)
 {
-       struct acpiphp_func *func;
+       struct pci_bus *bus = slot->bus;
+       struct pci_dev *dev;
 
-       list_for_each_entry(func, &slot->funcs, sibling)
-               if (acpiphp_no_hotplug(func_to_acpi_device(func)))
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               if (PCI_SLOT(dev->devfn) == slot->device && dev->ignore_hotplug)
                        return true;
-
+       }
        return false;
 }
 
@@ -645,7 +641,7 @@ static void trim_stale_devices(struct pci_dev *dev)
 
                status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
                alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
-                       || acpiphp_no_hotplug(adev);
+                       || dev->ignore_hotplug;
        }
        if (!alive)
                alive = pci_device_is_present(dev);
index 9da84b8..5e01ae3 100644 (file)
@@ -506,6 +506,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
 {
        struct controller *ctrl = (struct controller *)dev_id;
        struct pci_dev *pdev = ctrl_dev(ctrl);
+       struct pci_bus *subordinate = pdev->subordinate;
+       struct pci_dev *dev;
        struct slot *slot = ctrl->slot;
        u16 detected, intr_loc;
 
@@ -539,6 +541,16 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
                wake_up(&ctrl->queue);
        }
 
+       if (subordinate) {
+               list_for_each_entry(dev, &subordinate->devices, bus_list) {
+                       if (dev->ignore_hotplug) {
+                               ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n",
+                                        intr_loc, pci_name(dev));
+                               return IRQ_HANDLED;
+                       }
+               }
+       }
+
        if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
                return IRQ_HANDLED;
 
index e246a10..3e36ec8 100644 (file)
@@ -46,7 +46,6 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
                 */
                if (pci_is_pcie(dev))
                        return;
-               dev_info(&dev->dev, "using default PCI settings\n");
                hpp = &pci_default_type0;
        }
 
@@ -153,7 +152,6 @@ void pci_configure_slot(struct pci_dev *dev)
 {
        struct pci_dev *cdev;
        struct hotplug_params hpp;
-       int ret;
 
        if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
                        (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
@@ -163,9 +161,7 @@ void pci_configure_slot(struct pci_dev *dev)
        pcie_bus_configure_settings(dev->bus);
 
        memset(&hpp, 0, sizeof(hpp));
-       ret = pci_get_hp_params(dev, &hpp);
-       if (ret)
-               dev_warn(&dev->dev, "no hotplug settings from platform\n");
+       pci_get_hp_params(dev, &hpp);
 
        program_hpp_type2(dev, hpp.t2);
        program_hpp_type1(dev, hpp.t1);
index 0dd7427..f833aa2 100644 (file)
@@ -41,9 +41,9 @@ config PHY_MVEBU_SATA
 config PHY_MIPHY365X
        tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series"
        depends on ARCH_STI
-       depends on GENERIC_PHY
        depends on HAS_IOMEM
        depends on OF
+       select GENERIC_PHY
        help
          Enable this to support the miphy transceiver (for SATA/PCIE)
          that is part of STMicroelectronics STiH41x SoC series.
@@ -214,12 +214,14 @@ config PHY_QCOM_IPQ806X_SATA
 config PHY_ST_SPEAR1310_MIPHY
        tristate "ST SPEAR1310-MIPHY driver"
        select GENERIC_PHY
+       depends on MACH_SPEAR1310 || COMPILE_TEST
        help
          Support for ST SPEAr1310 MIPHY which can be used for PCIe and SATA.
 
 config PHY_ST_SPEAR1340_MIPHY
        tristate "ST SPEAR1340-MIPHY driver"
        select GENERIC_PHY
+       depends on MACH_SPEAR1340 || COMPILE_TEST
        help
          Support for ST SPEAr1340 MIPHY which can be used for PCIe and SATA.
 
index b05302b..392101c 100644 (file)
@@ -542,6 +542,7 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
        },
        { },
 };
+MODULE_DEVICE_TABLE(of, exynos5_usbdrd_phy_of_match);
 
 static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
 {
index e111baf..e0fb7a1 100644 (file)
@@ -163,6 +163,7 @@ enum miphy_sata_gen {
 };
 
 static u8 rx_tx_spd[] = {
+       0, /* GEN0 doesn't exist. */
        TX_SPDSEL_GEN1_VAL | RX_SPDSEL_GEN1_VAL,
        TX_SPDSEL_GEN2_VAL | RX_SPDSEL_GEN2_VAL,
        TX_SPDSEL_GEN3_VAL | RX_SPDSEL_GEN3_VAL
index e1a6623..9cd33a4 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/delay.h>
 #include <linux/usb/otg.h>
 #include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
 #include <linux/usb/musb-omap.h>
 #include <linux/usb/ulpi.h>
 #include <linux/i2c/twl.h>
@@ -422,37 +423,55 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
        }
 }
 
-static int twl4030_phy_power_off(struct phy *phy)
+static int twl4030_usb_runtime_suspend(struct device *dev)
 {
-       struct twl4030_usb *twl = phy_get_drvdata(phy);
+       struct twl4030_usb *twl = dev_get_drvdata(dev);
 
+       dev_dbg(twl->dev, "%s\n", __func__);
        if (twl->asleep)
                return 0;
 
        twl4030_phy_power(twl, 0);
        twl->asleep = 1;
-       dev_dbg(twl->dev, "%s\n", __func__);
+
        return 0;
 }
 
-static void __twl4030_phy_power_on(struct twl4030_usb *twl)
+static int twl4030_usb_runtime_resume(struct device *dev)
 {
+       struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+       dev_dbg(twl->dev, "%s\n", __func__);
+       if (!twl->asleep)
+               return 0;
+
        twl4030_phy_power(twl, 1);
-       twl4030_i2c_access(twl, 1);
-       twl4030_usb_set_mode(twl, twl->usb_mode);
-       if (twl->usb_mode == T2_USB_MODE_ULPI)
-               twl4030_i2c_access(twl, 0);
+       twl->asleep = 0;
+
+       return 0;
+}
+
+static int twl4030_phy_power_off(struct phy *phy)
+{
+       struct twl4030_usb *twl = phy_get_drvdata(phy);
+
+       dev_dbg(twl->dev, "%s\n", __func__);
+       pm_runtime_mark_last_busy(twl->dev);
+       pm_runtime_put_autosuspend(twl->dev);
+
+       return 0;
 }
 
 static int twl4030_phy_power_on(struct phy *phy)
 {
        struct twl4030_usb *twl = phy_get_drvdata(phy);
 
-       if (!twl->asleep)
-               return 0;
-       __twl4030_phy_power_on(twl);
-       twl->asleep = 0;
        dev_dbg(twl->dev, "%s\n", __func__);
+       pm_runtime_get_sync(twl->dev);
+       twl4030_i2c_access(twl, 1);
+       twl4030_usb_set_mode(twl, twl->usb_mode);
+       if (twl->usb_mode == T2_USB_MODE_ULPI)
+               twl4030_i2c_access(twl, 0);
 
        /*
         * XXX When VBUS gets driven after musb goes to A mode,
@@ -558,9 +577,27 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
                 * USB_LINK_VBUS state.  musb_hdrc won't care until it
                 * starts to handle softconnect right.
                 */
+               if ((status == OMAP_MUSB_VBUS_VALID) ||
+                   (status == OMAP_MUSB_ID_GROUND)) {
+                       if (twl->asleep)
+                               pm_runtime_get_sync(twl->dev);
+               } else {
+                       if (!twl->asleep) {
+                               pm_runtime_mark_last_busy(twl->dev);
+                               pm_runtime_put_autosuspend(twl->dev);
+                       }
+               }
                omap_musb_mailbox(status);
        }
-       sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+
+       /* don't schedule during sleep - irq works right then */
+       if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
+               cancel_delayed_work(&twl->id_workaround_work);
+               schedule_delayed_work(&twl->id_workaround_work, HZ);
+       }
+
+       if (irq)
+               sysfs_notify(&twl->dev->kobj, NULL, "vbus");
 
        return IRQ_HANDLED;
 }
@@ -569,29 +606,8 @@ static void twl4030_id_workaround_work(struct work_struct *work)
 {
        struct twl4030_usb *twl = container_of(work, struct twl4030_usb,
                id_workaround_work.work);
-       enum omap_musb_vbus_id_status status;
-       bool status_changed = false;
-
-       status = twl4030_usb_linkstat(twl);
-
-       spin_lock_irq(&twl->lock);
-       if (status >= 0 && status != twl->linkstat) {
-               twl->linkstat = status;
-               status_changed = true;
-       }
-       spin_unlock_irq(&twl->lock);
-
-       if (status_changed) {
-               dev_dbg(twl->dev, "handle missing status change to %d\n",
-                               status);
-               omap_musb_mailbox(status);
-       }
 
-       /* don't schedule during sleep - irq works right then */
-       if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
-               cancel_delayed_work(&twl->id_workaround_work);
-               schedule_delayed_work(&twl->id_workaround_work, HZ);
-       }
+       twl4030_usb_irq(0, twl);
 }
 
 static int twl4030_phy_init(struct phy *phy)
@@ -599,22 +615,17 @@ static int twl4030_phy_init(struct phy *phy)
        struct twl4030_usb *twl = phy_get_drvdata(phy);
        enum omap_musb_vbus_id_status status;
 
-       /*
-        * Start in sleep state, we'll get called through set_suspend()
-        * callback when musb is runtime resumed and it's time to start.
-        */
-       __twl4030_phy_power(twl, 0);
-       twl->asleep = 1;
-
+       pm_runtime_get_sync(twl->dev);
        status = twl4030_usb_linkstat(twl);
        twl->linkstat = status;
 
-       if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID) {
+       if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID)
                omap_musb_mailbox(twl->linkstat);
-               twl4030_phy_power_on(phy);
-       }
 
        sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+       pm_runtime_mark_last_busy(twl->dev);
+       pm_runtime_put_autosuspend(twl->dev);
+
        return 0;
 }
 
@@ -650,6 +661,11 @@ static const struct phy_ops ops = {
        .owner          = THIS_MODULE,
 };
 
+static const struct dev_pm_ops twl4030_usb_pm_ops = {
+       SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend,
+                          twl4030_usb_runtime_resume, NULL)
+};
+
 static int twl4030_usb_probe(struct platform_device *pdev)
 {
        struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev);
@@ -726,6 +742,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
 
        ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier);
 
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_get_sync(&pdev->dev);
+
        /* Our job is to use irqs and status from the power module
         * to keep the transceiver disabled when nothing's connected.
         *
@@ -744,6 +765,9 @@ static int twl4030_usb_probe(struct platform_device *pdev)
                return status;
        }
 
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_put_autosuspend(twl->dev);
+
        dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
        return 0;
 }
@@ -753,6 +777,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
        struct twl4030_usb *twl = platform_get_drvdata(pdev);
        int val;
 
+       pm_runtime_get_sync(twl->dev);
        cancel_delayed_work(&twl->id_workaround_work);
        device_remove_file(twl->dev, &dev_attr_vbus);
 
@@ -772,9 +797,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
 
        /* disable complete OTG block */
        twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
-
-       if (!twl->asleep)
-               twl4030_phy_power(twl, 0);
+       pm_runtime_mark_last_busy(twl->dev);
+       pm_runtime_put(twl->dev);
 
        return 0;
 }
@@ -792,6 +816,7 @@ static struct platform_driver twl4030_usb_driver = {
        .remove         = twl4030_usb_remove,
        .driver         = {
                .name   = "twl4030_usb",
+               .pm     = &twl4030_usb_pm_ops,
                .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(twl4030_usb_id_table),
        },
index 9ca59a0..e12e5b0 100644 (file)
@@ -461,6 +461,7 @@ static struct irq_chip byt_irqchip = {
        .irq_mask = byt_irq_mask,
        .irq_unmask = byt_irq_unmask,
        .irq_set_type = byt_irq_type,
+       .flags = IRQCHIP_SKIP_SET_WAKE,
 };
 
 static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
index 337634a..6d77dcd 100644 (file)
@@ -319,7 +319,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
                                    struct regulator_config *config)
 {
        struct device_node *nproot, *np;
-       nproot = of_node_get(pdev->dev.parent->of_node);
+       nproot = pdev->dev.parent->of_node;
        if (!nproot)
                return -ENODEV;
        nproot = of_get_child_by_name(nproot, "regulators");
index fdb6ea8..0003362 100644 (file)
@@ -422,9 +422,9 @@ static int da9052_regulator_probe(struct platform_device *pdev)
                config.init_data = pdata->regulators[pdev->id];
        } else {
 #ifdef CONFIG_OF
-               struct device_node *nproot, *np;
+               struct device_node *nproot = da9052->dev->of_node;
+               struct device_node *np;
 
-               nproot = of_node_get(da9052->dev->of_node);
                if (!nproot)
                        return -ENODEV;
 
index 9623e9e..3426be8 100644 (file)
@@ -226,7 +226,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)
        struct device_node *np, *regulators;
        int ret;
 
-       np = of_node_get(pdev->dev.parent->of_node);
+       np = pdev->dev.parent->of_node;
        if (!np)
                return 0;
 
index dad2bcd..7770777 100644 (file)
@@ -250,7 +250,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
        struct device_node *nproot, *np;
        int rcount;
 
-       nproot = of_node_get(pdev->dev.parent->of_node);
+       nproot = pdev->dev.parent->of_node;
        if (!nproot)
                return -ENODEV;
        np = of_get_child_by_name(nproot, "regulators");
index 90b4c53..9c31e21 100644 (file)
@@ -917,7 +917,7 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
        struct max8997_regulator_data *rdata;
        unsigned int i, dvs_voltage_nr = 1, ret;
 
-       pmic_np = of_node_get(iodev->dev->of_node);
+       pmic_np = iodev->dev->of_node;
        if (!pmic_np) {
                dev_err(&pdev->dev, "could not find pmic sub-node\n");
                return -ENODEV;
index a7ce34d..1878e5b 100644 (file)
@@ -1427,7 +1427,6 @@ static void palmas_dt_to_pdata(struct device *dev,
        u32 prop;
        int idx, ret;
 
-       node = of_node_get(node);
        regulators = of_get_child_by_name(node, "regulators");
        if (!regulators) {
                dev_info(dev, "regulator node not found\n");
index fa7db88..e584c99 100644 (file)
@@ -1014,7 +1014,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
        if (!pmic_plat_data)
                return NULL;
 
-       np = of_node_get(pdev->dev.parent->of_node);
+       np = pdev->dev.parent->of_node;
        regulators = of_get_child_by_name(np, "regulators");
        if (!regulators) {
                dev_err(&pdev->dev, "regulator node not found\n");
index 2ead7e7..14ba80b 100644 (file)
@@ -77,7 +77,7 @@ EXPORT_SYMBOL_GPL(dasd_nofcx);
  * strings when running as a module.
  */
 static char *dasd[256];
-module_param_array(dasd, charp, NULL, 0);
+module_param_array(dasd, charp, NULL, S_IRUGO);
 
 /*
  * Single spinlock to protect devmap and servermap structures and lists.
index 18a3358..bd85fb4 100644 (file)
@@ -43,7 +43,7 @@ config SCSI_DMA
 config SCSI_NETLINK
        bool
        default n
-       select NET
+       depends on NET
 
 config SCSI_PROC_FS
        bool "legacy /proc/scsi/ support"
@@ -257,7 +257,7 @@ config SCSI_SPI_ATTRS
 
 config SCSI_FC_ATTRS
        tristate "FiberChannel Transport Attributes"
-       depends on SCSI
+       depends on SCSI && NET
        select SCSI_NETLINK
        help
          If you wish to export transport-specific information about
@@ -585,28 +585,28 @@ config HYPERV_STORAGE
 
 config LIBFC
        tristate "LibFC module"
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select CRC32
        ---help---
          Fibre Channel library module
 
 config LIBFCOE
        tristate "LibFCoE module"
-       select LIBFC
+       depends on LIBFC
        ---help---
          Library for Fibre Channel over Ethernet module
 
 config FCOE
        tristate "FCoE module"
        depends on PCI
-       select LIBFCOE
+       depends on LIBFCOE
        ---help---
          Fibre Channel over Ethernet module
 
 config FCOE_FNIC
        tristate "Cisco FNIC Driver"
        depends on PCI && X86
-       select LIBFCOE
+       depends on LIBFCOE
        help
          This is support for the Cisco PCI-Express FCoE HBA.
 
@@ -816,7 +816,7 @@ config SCSI_IBMVSCSI
 config SCSI_IBMVFC
        tristate "IBM Virtual FC support"
        depends on PPC_PSERIES && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        help
          This is the IBM POWER Virtual FC Client
 
@@ -1266,7 +1266,7 @@ source "drivers/scsi/qla4xxx/Kconfig"
 config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select CRC_T10DIF
        help
           This lpfc driver supports the Emulex LightPulse
@@ -1676,7 +1676,7 @@ config SCSI_SUNESP
 config ZFCP
        tristate "FCP host bus adapter driver for IBM eServer zSeries"
        depends on S390 && QDIO && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        help
           If you want to access SCSI devices attached to your IBM eServer
           zSeries by means of Fibre Channel interfaces say Y.
@@ -1704,7 +1704,7 @@ config SCSI_PM8001
 config SCSI_BFA_FC
        tristate "Brocade BFA Fibre Channel Support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        help
          This bfa driver supports all Brocade PCIe FC/FCOE host adapters.
 
index f245d54..0978828 100644 (file)
@@ -1,11 +1,12 @@
 config SCSI_BNX2X_FCOE
        tristate "QLogic NetXtreme II FCoE support"
        depends on PCI
+       depends on (IPV6 || IPV6=n)
+       depends on LIBFC
+       depends on LIBFCOE
        select NETDEVICES
        select ETHERNET
        select NET_VENDOR_BROADCOM
-       select LIBFC
-       select LIBFCOE
        select CNIC
        ---help---
        This driver supports FCoE offload for the QLogic NetXtreme II
index 44ce54e..ba30ff8 100644 (file)
@@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI
        tristate "QLogic NetXtreme II iSCSI support"
        depends on NET
        depends on PCI
+       depends on (IPV6 || IPV6=n)
        select SCSI_ISCSI_ATTRS
        select NETDEVICES
        select ETHERNET
index 4d03b03..7c7e508 100644 (file)
@@ -1,7 +1,7 @@
 config SCSI_CHELSIO_FCOE
        tristate "Chelsio Communications FCoE support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select FW_LOADER
        help
          This driver supports FCoE Offload functionality over
index ea025e4..191b597 100644 (file)
@@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                        return NULL;
                }
 
+               if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
+                       iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
+                       return NULL;
+               }
+
                task = conn->login_task;
        } else {
                if (session->state != ISCSI_STATE_LOGGED_IN)
                        return NULL;
 
+               if (data_size != 0) {
+                       iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
+                       return NULL;
+               }
+
                BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
                BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
 
index 23d6072..113e6c9 100644 (file)
@@ -1,7 +1,7 @@
 config SCSI_QLA_FC
        tristate "QLogic QLA2XXX Fibre Channel Support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select FW_LOADER
        ---help---
        This qla2xxx driver supports all QLogic Fibre Channel
@@ -31,7 +31,7 @@ config SCSI_QLA_FC
 config TCM_QLA2XXX
        tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
        depends on SCSI_QLA_FC && TARGET_CORE
-       select LIBFC
+       depends on LIBFC
        select BTREE
        default n
        ---help---
index d837dc1..aaea4b9 100644 (file)
@@ -733,12 +733,13 @@ static bool scsi_end_request(struct request *req, int error,
        } else {
                unsigned long flags;
 
+               if (bidi_bytes)
+                       scsi_release_bidi_buffers(cmd);
+
                spin_lock_irqsave(q->queue_lock, flags);
                blk_finish_request(req, error);
                spin_unlock_irqrestore(q->queue_lock, flags);
 
-               if (bidi_bytes)
-                       scsi_release_bidi_buffers(cmd);
                scsi_release_buffers(cmd);
                scsi_next_command(cmd);
        }
index 48f1d26..134fb6e 100644 (file)
@@ -397,24 +397,21 @@ static int davinci_spi_setup(struct spi_device *spi)
        struct spi_master *master = spi->master;
        struct device_node *np = spi->dev.of_node;
        bool internal_cs = true;
-       unsigned long flags = GPIOF_DIR_OUT;
 
        dspi = spi_master_get_devdata(spi->master);
        pdata = &dspi->pdata;
 
-       flags |= (spi->mode & SPI_CS_HIGH) ? GPIOF_INIT_LOW : GPIOF_INIT_HIGH;
-
        if (!(spi->mode & SPI_NO_CS)) {
                if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) {
-                       retval = gpio_request_one(spi->cs_gpio,
-                                                 flags, dev_name(&spi->dev));
+                       retval = gpio_direction_output(
+                                     spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
                        internal_cs = false;
                } else if (pdata->chip_sel &&
                           spi->chip_select < pdata->num_chipselect &&
                           pdata->chip_sel[spi->chip_select] != SPI_INTERN_CS) {
                        spi->cs_gpio = pdata->chip_sel[spi->chip_select];
-                       retval = gpio_request_one(spi->cs_gpio,
-                                                 flags, dev_name(&spi->dev));
+                       retval = gpio_direction_output(
+                                     spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
                        internal_cs = false;
                }
 
@@ -439,12 +436,6 @@ static int davinci_spi_setup(struct spi_device *spi)
        return retval;
 }
 
-static void davinci_spi_cleanup(struct spi_device *spi)
-{
-       if (spi->cs_gpio >= 0)
-               gpio_free(spi->cs_gpio);
-}
-
 static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
 {
        struct device *sdev = dspi->bitbang.master->dev.parent;
@@ -956,7 +947,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
        master->num_chipselect = pdata->num_chipselect;
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
        master->setup = davinci_spi_setup;
-       master->cleanup = davinci_spi_cleanup;
 
        dspi->bitbang.chipselect = davinci_spi_chipselect;
        dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
@@ -967,6 +957,27 @@ static int davinci_spi_probe(struct platform_device *pdev)
        if (dspi->version == SPI_VERSION_2)
                dspi->bitbang.flags |= SPI_READY;
 
+       if (pdev->dev.of_node) {
+               int i;
+
+               for (i = 0; i < pdata->num_chipselect; i++) {
+                       int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
+                                                       "cs-gpios", i);
+
+                       if (cs_gpio == -EPROBE_DEFER) {
+                               ret = cs_gpio;
+                               goto free_clk;
+                       }
+
+                       if (gpio_is_valid(cs_gpio)) {
+                               ret = devm_gpio_request(&pdev->dev, cs_gpio,
+                                                       dev_name(&pdev->dev));
+                               if (ret)
+                                       goto free_clk;
+                       }
+               }
+       }
+
        r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
        if (r)
                dma_rx_chan = r->start;
index 670f062..0dd0623 100644 (file)
@@ -547,8 +547,7 @@ static int dw_spi_setup(struct spi_device *spi)
        /* Only alloc on first setup */
        chip = spi_get_ctldata(spi);
        if (!chip) {
-               chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
-                               GFP_KERNEL);
+               chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
                if (!chip)
                        return -ENOMEM;
                spi_set_ctldata(spi, chip);
@@ -606,6 +605,14 @@ static int dw_spi_setup(struct spi_device *spi)
        return 0;
 }
 
+static void dw_spi_cleanup(struct spi_device *spi)
+{
+       struct chip_data *chip = spi_get_ctldata(spi);
+
+       kfree(chip);
+       spi_set_ctldata(spi, NULL);
+}
+
 /* Restart the controller, disable all interrupts, clean rx fifo */
 static void spi_hw_init(struct dw_spi *dws)
 {
@@ -661,6 +668,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        master->bus_num = dws->bus_num;
        master->num_chipselect = dws->num_cs;
        master->setup = dw_spi_setup;
+       master->cleanup = dw_spi_cleanup;
        master->transfer_one_message = dw_spi_transfer_one_message;
        master->max_speed_hz = dws->max_freq;
 
index 8ebd724..429e111 100644 (file)
@@ -452,16 +452,16 @@ static int fsl_espi_setup(struct spi_device *spi)
        int retval;
        u32 hw_mode;
        u32 loop_mode;
-       struct spi_mpc8xxx_cs *cs = spi->controller_state;
+       struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
 
        if (!spi->max_speed_hz)
                return -EINVAL;
 
        if (!cs) {
-               cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
+               cs = kzalloc(sizeof(*cs), GFP_KERNEL);
                if (!cs)
                        return -ENOMEM;
-               spi->controller_state = cs;
+               spi_set_ctldata(spi, cs);
        }
 
        mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -496,6 +496,14 @@ static int fsl_espi_setup(struct spi_device *spi)
        return 0;
 }
 
+static void fsl_espi_cleanup(struct spi_device *spi)
+{
+       struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+
+       kfree(cs);
+       spi_set_ctldata(spi, NULL);
+}
+
 void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
 {
        struct fsl_espi_reg *reg_base = mspi->reg_base;
@@ -605,6 +613,7 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
 
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
        master->setup = fsl_espi_setup;
+       master->cleanup = fsl_espi_cleanup;
 
        mpc8xxx_spi = spi_master_get_devdata(master);
        mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg;
index 9452f67..590f31b 100644 (file)
@@ -425,16 +425,16 @@ static int fsl_spi_setup(struct spi_device *spi)
        struct fsl_spi_reg *reg_base;
        int retval;
        u32 hw_mode;
-       struct spi_mpc8xxx_cs   *cs = spi->controller_state;
+       struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
 
        if (!spi->max_speed_hz)
                return -EINVAL;
 
        if (!cs) {
-               cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
+               cs = kzalloc(sizeof(*cs), GFP_KERNEL);
                if (!cs)
                        return -ENOMEM;
-               spi->controller_state = cs;
+               spi_set_ctldata(spi, cs);
        }
        mpc8xxx_spi = spi_master_get_devdata(spi->master);
 
@@ -496,9 +496,13 @@ static int fsl_spi_setup(struct spi_device *spi)
 static void fsl_spi_cleanup(struct spi_device *spi)
 {
        struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+       struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
 
        if (mpc8xxx_spi->type == TYPE_GRLIB && gpio_is_valid(spi->cs_gpio))
                gpio_free(spi->cs_gpio);
+
+       kfree(cs);
+       spi_set_ctldata(spi, NULL);
 }
 
 static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
index 1189cfd..f1f0a58 100644 (file)
@@ -2136,7 +2136,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
                                                cs_gpio);
                                else if (gpio_direction_output(cs_gpio, 1))
                                        dev_err(&adev->dev,
-                                               "could set gpio %d as output\n",
+                                               "could not set gpio %d as output\n",
                                                cs_gpio);
                        }
                }
index cd0e08b..3afc266 100644 (file)
@@ -220,7 +220,7 @@ static inline void wait_for_idle(struct rockchip_spi *rs)
        do {
                if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
                        return;
-       } while (time_before(jiffies, timeout));
+       } while (!time_after(jiffies, timeout));
 
        dev_warn(rs->dev, "spi controller is in busy state!\n");
 }
@@ -529,7 +529,8 @@ static int rockchip_spi_transfer_one(
        int ret = 0;
        struct rockchip_spi *rs = spi_master_get_devdata(master);
 
-       WARN_ON((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
+       WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
+               (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
 
        if (!xfer->tx_buf && !xfer->rx_buf) {
                dev_err(rs->dev, "No buffer for transfer\n");
index 95ac276..6f0602f 100644 (file)
@@ -312,6 +312,8 @@ static int spi_sirfsoc_cmd_transfer(struct spi_device *spi,
        u32 cmd;
 
        sspi = spi_master_get_devdata(spi->master);
+       writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+       writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
        memcpy(&cmd, sspi->tx, t->len);
        if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
                cmd = cpu_to_be32(cmd) >>
@@ -438,7 +440,8 @@ static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
                        sspi->tx_word(sspi);
                writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
                        SIRFSOC_SPI_TX_UFLOW_INT_EN |
-                       SIRFSOC_SPI_RX_OFLOW_INT_EN,
+                       SIRFSOC_SPI_RX_OFLOW_INT_EN |
+                       SIRFSOC_SPI_RX_IO_DMA_INT_EN,
                        sspi->base + SIRFSOC_SPI_INT_EN);
                writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
                        sspi->base + SIRFSOC_SPI_TX_RX_EN);
index e7b2e02..69139ce 100644 (file)
@@ -199,7 +199,6 @@ struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
        fence->num_fences = 1;
        atomic_set(&fence->status, 1);
 
-       fence_get(&pt->base);
        fence->cbs[0].sync_pt = &pt->base;
        fence->cbs[0].fence = fence;
        if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
index ea01b8f..6f45ce0 100644 (file)
@@ -85,7 +85,7 @@ int ade7758_probe_trigger(struct iio_dev *indio_dev)
        ret = iio_trigger_register(st->trig);
 
        /* select default trigger */
-       indio_dev->trig = st->trig;
+       indio_dev->trig = iio_trigger_get(st->trig);
        if (ret)
                goto error_free_irq;
 
index 7e3f019..4662e00 100644 (file)
@@ -574,6 +574,9 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
        for (i = 0; i < 2; i++) {
                struct imx_ldb_channel *channel = &imx_ldb->channel[i];
 
+               if (!channel->connector.funcs)
+                       continue;
+
                channel->connector.funcs->destroy(&channel->connector);
                channel->encoder.funcs->destroy(&channel->encoder);
        }
index 6f393a1..50de10a 100644 (file)
@@ -281,7 +281,8 @@ static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode)
 
                ipu_idmac_put(ipu_plane->ipu_ch);
                ipu_dmfc_put(ipu_plane->dmfc);
-               ipu_dp_put(ipu_plane->dp);
+               if (ipu_plane->dp)
+                       ipu_dp_put(ipu_plane->dp);
        }
 }
 
index 0367f5a..0c59e26 100644 (file)
@@ -568,7 +568,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
        if (sb->s_root == NULL) {
                CERROR("%s: can't make root dentry\n",
                        ll_get_fsname(sb, NULL, 0));
-               GOTO(out_root, err = -ENOMEM);
+               GOTO(out_lock_cn_cb, err = -ENOMEM);
        }
 
        sbi->ll_sdev_orig = sb->s_dev;
index f105c2a..164136b 100644 (file)
@@ -350,6 +350,9 @@ static int hostap_set_generic_element(PSDevice pDevice,
 {
        PSMgmtObject    pMgmt = pDevice->pMgmt;
 
+       if (param->u.generic_elem.len > sizeof(pMgmt->abyWPAIE))
+               return -EINVAL;
+
        memcpy(pMgmt->abyWPAIE,
               param->u.generic_elem.data,
               param->u.generic_elem.len
index 1f4c794..260c3e1 100644 (file)
@@ -4540,6 +4540,7 @@ static void iscsit_logout_post_handler_diffcid(
 {
        struct iscsi_conn *l_conn;
        struct iscsi_session *sess = conn->sess;
+       bool conn_found = false;
 
        if (!sess)
                return;
@@ -4548,12 +4549,13 @@ static void iscsit_logout_post_handler_diffcid(
        list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
                if (l_conn->cid == cid) {
                        iscsit_inc_conn_usage_count(l_conn);
+                       conn_found = true;
                        break;
                }
        }
        spin_unlock_bh(&sess->conn_lock);
 
-       if (!l_conn)
+       if (!conn_found)
                return;
 
        if (l_conn->sock)
index 02f9de2..18c2926 100644 (file)
@@ -601,7 +601,7 @@ int iscsi_copy_param_list(
        param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
        if (!param_list) {
                pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
-               goto err_out;
+               return -1;
        }
        INIT_LIST_HEAD(&param_list->param_list);
        INIT_LIST_HEAD(&param_list->extra_response_list);
index fd90b28..73355f4 100644 (file)
@@ -400,6 +400,8 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
 
        spin_lock_bh(&conn->cmd_lock);
        list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
+               if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
+                       continue;
                if (cmd->init_task_tag == init_task_tag) {
                        spin_unlock_bh(&conn->cmd_lock);
                        return cmd;
index bf55c5a..756def3 100644 (file)
@@ -2363,7 +2363,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
                pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
                return -EINVAL;                                         \
        }                                                               \
-       if (!tmp)                                                       \
+       if (tmp)                                                        \
                t->_var |= _bit;                                        \
        else                                                            \
                t->_var &= ~_bit;                                       \
index 6cd7222..bc286a6 100644 (file)
@@ -664,7 +664,7 @@ spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
        buf[0] = dev->transport->get_device_type(dev);
        buf[3] = 0x0c;
        put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
-       put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
+       put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]);
 
        return 0;
 }
index 4db7987..57d9df8 100644 (file)
@@ -540,6 +540,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
        { "INT3434", 0 },
        { "INT3435", 0 },
        { "80860F0A", 0 },
+       { "8086228A", 0 },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
index 7b63677..d7d4584 100644 (file)
@@ -526,6 +526,45 @@ static void atmel_enable_ms(struct uart_port *port)
        UART_PUT_IER(port, ier);
 }
 
+/*
+ * Disable modem status interrupts
+ */
+static void atmel_disable_ms(struct uart_port *port)
+{
+       struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+       uint32_t idr = 0;
+
+       /*
+        * Interrupt should not be disabled twice
+        */
+       if (!atmel_port->ms_irq_enabled)
+               return;
+
+       atmel_port->ms_irq_enabled = false;
+
+       if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]);
+       else
+               idr |= ATMEL_US_CTSIC;
+
+       if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]);
+       else
+               idr |= ATMEL_US_DSRIC;
+
+       if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_RI]);
+       else
+               idr |= ATMEL_US_RIIC;
+
+       if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]);
+       else
+               idr |= ATMEL_US_DCDIC;
+
+       UART_PUT_IDR(port, idr);
+}
+
 /*
  * Control the transmission of a break signal
  */
@@ -1993,7 +2032,9 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
 
        /* CTS flow-control and modem-status interrupts */
        if (UART_ENABLE_MS(port, termios->c_cflag))
-               port->ops->enable_ms(port);
+               atmel_enable_ms(port);
+       else
+               atmel_disable_ms(port);
 
        spin_unlock_irqrestore(&port->lock, flags);
 }
index 01951d2..806e4bc 100644 (file)
@@ -581,7 +581,7 @@ static unsigned int cdns_uart_tx_empty(struct uart_port *port)
 {
        unsigned int status;
 
-       status = cdns_uart_readl(CDNS_UART_ISR_OFFSET) & CDNS_UART_IXR_TXEMPTY;
+       status = cdns_uart_readl(CDNS_UART_SR_OFFSET) & CDNS_UART_SR_TXEMPTY;
        return status ? TIOCSER_TEMT : 0;
 }
 
index d72b9d2..4935ac3 100644 (file)
 static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
 {
        struct device *dev = ci->gadget.dev.parent;
-       int val;
 
        switch (event) {
        case CI_HDRC_CONTROLLER_RESET_EVENT:
                dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
                writel(0, USB_AHBBURST);
                writel(0, USB_AHBMODE);
+               usb_phy_init(ci->transceiver);
                break;
        case CI_HDRC_CONTROLLER_STOPPED_EVENT:
                dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
@@ -34,10 +34,7 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
                 * Put the transceiver in non-driving mode. Otherwise host
                 * may not detect soft-disconnection.
                 */
-               val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL);
-               val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
-               val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
-               usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL);
+               usb_phy_notify_disconnect(ci->transceiver, USB_SPEED_UNKNOWN);
                break;
        default:
                dev_dbg(dev, "unknown ci_hdrc event\n");
index 46f5161..d481c99 100644 (file)
@@ -5024,9 +5024,10 @@ static void hub_events(void)
 
                hub = list_entry(tmp, struct usb_hub, event_list);
                kref_get(&hub->kref);
+               hdev = hub->hdev;
+               usb_get_dev(hdev);
                spin_unlock_irq(&hub_event_lock);
 
-               hdev = hub->hdev;
                hub_dev = hub->intfdev;
                intf = to_usb_interface(hub_dev);
                dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
@@ -5139,6 +5140,7 @@ static void hub_events(void)
                usb_autopm_put_interface(intf);
  loop_disconnected:
                usb_unlock_device(hdev);
+               usb_put_dev(hdev);
                kref_put(&hub->kref, hub_release);
 
        } /* end while (1) */
index 7c9618e..ce6071d 100644 (file)
@@ -1649,6 +1649,7 @@ static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
                        dev_err(hsotg->dev,
                                "%s: timeout flushing fifo (GRSTCTL=%08x)\n",
                                __func__, val);
+                       break;
                }
 
                udelay(1);
@@ -2747,13 +2748,14 @@ static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
 
        dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
 
-       if (hsotg->phy) {
-               phy_init(hsotg->phy);
-               phy_power_on(hsotg->phy);
-       } else if (hsotg->uphy)
+       if (hsotg->uphy)
                usb_phy_init(hsotg->uphy);
-       else if (hsotg->plat->phy_init)
+       else if (hsotg->plat && hsotg->plat->phy_init)
                hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
+       else {
+               phy_init(hsotg->phy);
+               phy_power_on(hsotg->phy);
+       }
 }
 
 /**
@@ -2767,13 +2769,14 @@ static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
 {
        struct platform_device *pdev = to_platform_device(hsotg->dev);
 
-       if (hsotg->phy) {
-               phy_power_off(hsotg->phy);
-               phy_exit(hsotg->phy);
-       } else if (hsotg->uphy)
+       if (hsotg->uphy)
                usb_phy_shutdown(hsotg->uphy);
-       else if (hsotg->plat->phy_exit)
+       else if (hsotg->plat && hsotg->plat->phy_exit)
                hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
+       else {
+               phy_power_off(hsotg->phy);
+               phy_exit(hsotg->phy);
+       }
 }
 
 /**
@@ -2892,13 +2895,11 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
                return -ENODEV;
 
        /* all endpoints should be shutdown */
-       for (ep = 0; ep < hsotg->num_of_eps; ep++)
+       for (ep = 1; ep < hsotg->num_of_eps; ep++)
                s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
 
        spin_lock_irqsave(&hsotg->lock, flags);
 
-       s3c_hsotg_phy_disable(hsotg);
-
        if (!driver)
                hsotg->driver = NULL;
 
@@ -2941,7 +2942,6 @@ static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
                s3c_hsotg_phy_enable(hsotg);
                s3c_hsotg_core_init(hsotg);
        } else {
-               s3c_hsotg_disconnect(hsotg);
                s3c_hsotg_phy_disable(hsotg);
        }
 
@@ -3441,13 +3441,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
 
        hsotg->irq = ret;
 
-       ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
-                               dev_name(dev), hsotg);
-       if (ret < 0) {
-               dev_err(dev, "cannot claim IRQ\n");
-               goto err_clk;
-       }
-
        dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
 
        hsotg->gadget.max_speed = USB_SPEED_HIGH;
@@ -3488,9 +3481,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
        if (hsotg->phy && (phy_get_bus_width(phy) == 8))
                hsotg->phyif = GUSBCFG_PHYIF8;
 
-       if (hsotg->phy)
-               phy_init(hsotg->phy);
-
        /* usb phy enable */
        s3c_hsotg_phy_enable(hsotg);
 
@@ -3498,6 +3488,17 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
        s3c_hsotg_init(hsotg);
        s3c_hsotg_hw_cfg(hsotg);
 
+       ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
+                               dev_name(dev), hsotg);
+       if (ret < 0) {
+               s3c_hsotg_phy_disable(hsotg);
+               clk_disable_unprepare(hsotg->clk);
+               regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
+                                      hsotg->supplies);
+               dev_err(dev, "cannot claim IRQ\n");
+               goto err_clk;
+       }
+
        /* hsotg->num_of_eps holds number of EPs other than ep0 */
 
        if (hsotg->num_of_eps == 0) {
@@ -3582,9 +3583,6 @@ static int s3c_hsotg_remove(struct platform_device *pdev)
                usb_gadget_unregister_driver(hsotg->driver);
        }
 
-       s3c_hsotg_phy_disable(hsotg);
-       if (hsotg->phy)
-               phy_exit(hsotg->phy);
        clk_disable_unprepare(hsotg->clk);
 
        return 0;
index b769c1f..9069984 100644 (file)
@@ -799,20 +799,21 @@ static int dwc3_remove(struct platform_device *pdev)
 {
        struct dwc3     *dwc = platform_get_drvdata(pdev);
 
+       dwc3_debugfs_exit(dwc);
+       dwc3_core_exit_mode(dwc);
+       dwc3_event_buffers_cleanup(dwc);
+       dwc3_free_event_buffers(dwc);
+
        usb_phy_set_suspend(dwc->usb2_phy, 1);
        usb_phy_set_suspend(dwc->usb3_phy, 1);
        phy_power_off(dwc->usb2_generic_phy);
        phy_power_off(dwc->usb3_generic_phy);
 
+       dwc3_core_exit(dwc);
+
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       dwc3_debugfs_exit(dwc);
-       dwc3_core_exit_mode(dwc);
-       dwc3_event_buffers_cleanup(dwc);
-       dwc3_free_event_buffers(dwc);
-       dwc3_core_exit(dwc);
-
        return 0;
 }
 
index 9dcfbe7..fc0de37 100644 (file)
@@ -576,9 +576,9 @@ static int dwc3_omap_remove(struct platform_device *pdev)
        if (omap->extcon_id_dev.edev)
                extcon_unregister_interest(&omap->extcon_id_dev);
        dwc3_omap_disable_irqs(omap);
+       device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
 
        return 0;
 }
index 349cacc..490a6ca 100644 (file)
@@ -527,7 +527,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
                dep->stream_capable = true;
        }
 
-       if (usb_endpoint_xfer_isoc(desc))
+       if (!usb_endpoint_xfer_control(desc))
                params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
 
        /*
@@ -1225,16 +1225,17 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
 
        int                             ret;
 
+       spin_lock_irqsave(&dwc->lock, flags);
        if (!dep->endpoint.desc) {
                dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
                                request, ep->name);
+               spin_unlock_irqrestore(&dwc->lock, flags);
                return -ESHUTDOWN;
        }
 
        dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
                        request, ep->name, request->length);
 
-       spin_lock_irqsave(&dwc->lock, flags);
        ret = __dwc3_gadget_ep_queue(dep, req);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -2041,12 +2042,6 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
                dwc3_endpoint_transfer_complete(dwc, dep, event);
                break;
        case DWC3_DEPEVT_XFERINPROGRESS:
-               if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
-                       dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
-                                       dep->name);
-                       return;
-               }
-
                dwc3_endpoint_transfer_complete(dwc, dep, event);
                break;
        case DWC3_DEPEVT_XFERNOTREADY:
index dc30adf..0dc3552 100644 (file)
@@ -155,6 +155,12 @@ struct ffs_io_data {
        struct usb_request *req;
 };
 
+struct ffs_desc_helper {
+       struct ffs_data *ffs;
+       unsigned interfaces_count;
+       unsigned eps_count;
+};
+
 static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
 
@@ -1830,7 +1836,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
                                u8 *valuep, struct usb_descriptor_header *desc,
                                void *priv)
 {
-       struct ffs_data *ffs = priv;
+       struct ffs_desc_helper *helper = priv;
+       struct usb_endpoint_descriptor *d;
 
        ENTER();
 
@@ -1844,8 +1851,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
                 * encountered interface "n" then there are at least
                 * "n+1" interfaces.
                 */
-               if (*valuep >= ffs->interfaces_count)
-                       ffs->interfaces_count = *valuep + 1;
+               if (*valuep >= helper->interfaces_count)
+                       helper->interfaces_count = *valuep + 1;
                break;
 
        case FFS_STRING:
@@ -1853,14 +1860,22 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
                 * Strings are indexed from 1 (0 is magic ;) reserved
                 * for languages list or some such)
                 */
-               if (*valuep > ffs->strings_count)
-                       ffs->strings_count = *valuep;
+               if (*valuep > helper->ffs->strings_count)
+                       helper->ffs->strings_count = *valuep;
                break;
 
        case FFS_ENDPOINT:
-               /* Endpoints are indexed from 1 as well. */
-               if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
-                       ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
+               d = (void *)desc;
+               helper->eps_count++;
+               if (helper->eps_count >= 15)
+                       return -EINVAL;
+               /* Check if descriptors for any speed were already parsed */
+               if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
+                       helper->ffs->eps_addrmap[helper->eps_count] =
+                               d->bEndpointAddress;
+               else if (helper->ffs->eps_addrmap[helper->eps_count] !=
+                               d->bEndpointAddress)
+                       return -EINVAL;
                break;
        }
 
@@ -2053,6 +2068,7 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
        char *data = _data, *raw_descs;
        unsigned os_descs_count = 0, counts[3], flags;
        int ret = -EINVAL, i;
+       struct ffs_desc_helper helper;
 
        ENTER();
 
@@ -2101,13 +2117,29 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
 
        /* Read descriptors */
        raw_descs = data;
+       helper.ffs = ffs;
        for (i = 0; i < 3; ++i) {
                if (!counts[i])
                        continue;
+               helper.interfaces_count = 0;
+               helper.eps_count = 0;
                ret = ffs_do_descs(counts[i], data, len,
-                                  __ffs_data_do_entity, ffs);
+                                  __ffs_data_do_entity, &helper);
                if (ret < 0)
                        goto error;
+               if (!ffs->eps_count && !ffs->interfaces_count) {
+                       ffs->eps_count = helper.eps_count;
+                       ffs->interfaces_count = helper.interfaces_count;
+               } else {
+                       if (ffs->eps_count != helper.eps_count) {
+                               ret = -EINVAL;
+                               goto error;
+                       }
+                       if (ffs->interfaces_count != helper.interfaces_count) {
+                               ret = -EINVAL;
+                               goto error;
+                       }
+               }
                data += ret;
                len  -= ret;
        }
@@ -2342,9 +2374,18 @@ static void ffs_event_add(struct ffs_data *ffs,
        spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
 }
 
-
 /* Bind/unbind USB function hooks *******************************************/
 
+static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
+{
+       int i;
+
+       for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
+               if (ffs->eps_addrmap[i] == endpoint_address)
+                       return i;
+       return -ENOENT;
+}
+
 static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
                                    struct usb_descriptor_header *desc,
                                    void *priv)
@@ -2378,7 +2419,10 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
        if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
                return 0;
 
-       idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
+       idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
+       if (idx < 0)
+               return idx;
+
        ffs_ep = func->eps + idx;
 
        if (unlikely(ffs_ep->descs[ep_desc_id])) {
index 63d6e71..d48897e 100644 (file)
@@ -224,6 +224,8 @@ struct ffs_data {
        void                            *ms_os_descs_ext_prop_name_avail;
        void                            *ms_os_descs_ext_prop_data_avail;
 
+       u8                              eps_addrmap[15];
+
        unsigned short                  strings_count;
        unsigned short                  interfaces_count;
        unsigned short                  eps_count;
index ae811d8..ad39f89 100644 (file)
@@ -12,7 +12,7 @@
 
 
 #ifndef __FUSB300_UDC_H__
-#define __FUSB300_UDC_H_
+#define __FUSB300_UDC_H__
 
 #include <linux/kernel.h>
 
index f4eac11..2e95715 100644 (file)
@@ -3320,7 +3320,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
        if (stat & tmp) {
                writel(tmp, &dev->regs->irqstat1);
                if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
-                               (readl(&dev->usb->usbstat) & mask)) ||
+                               ((readl(&dev->usb->usbstat) & mask) == 0)) ||
                                ((readl(&dev->usb->usbctl) &
                                        BIT(VBUS_PIN)) == 0)) &&
                                (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
index 81cda09..488a308 100644 (file)
@@ -965,8 +965,6 @@ rescan:
        }
 
        qh->exception = 1;
-       if (ehci->rh_state < EHCI_RH_RUNNING)
-               qh->qh_state = QH_STATE_IDLE;
        switch (qh->qh_state) {
        case QH_STATE_LINKED:
                WARN_ON(!list_empty(&qh->qtd_list));
index aa79e87..69aece3 100644 (file)
@@ -468,7 +468,8 @@ static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg)
 }
 
 /* Updates Link Status for super Speed port */
-static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
+static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
+               u32 *status, u32 status_reg)
 {
        u32 pls = status_reg & PORT_PLS_MASK;
 
@@ -507,7 +508,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
                 * in which sometimes the port enters compliance mode
                 * caused by a delay on the host-device negotiation.
                 */
-               if (pls == USB_SS_PORT_LS_COMP_MOD)
+               if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+                               (pls == USB_SS_PORT_LS_COMP_MOD))
                        pls |= USB_PORT_STAT_CONNECTION;
        }
 
@@ -666,7 +668,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
        }
        /* Update Port Link State */
        if (hcd->speed == HCD_USB3) {
-               xhci_hub_report_usb3_link_state(&status, raw_port_status);
+               xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status);
                /*
                 * Verify if all USB3 Ports Have entered U0 already.
                 * Delete Compliance Mode Timer if so.
index 8056d90..8936211 100644 (file)
@@ -1812,6 +1812,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
 
        if (xhci->lpm_command)
                xhci_free_command(xhci, xhci->lpm_command);
+       xhci->lpm_command = NULL;
        if (xhci->cmd_ring)
                xhci_ring_free(xhci, xhci->cmd_ring);
        xhci->cmd_ring = NULL;
@@ -1819,7 +1820,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        xhci_cleanup_command_queue(xhci);
 
        num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
-       for (i = 0; i < num_ports; i++) {
+       for (i = 0; i < num_ports && xhci->rh_bw; i++) {
                struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
                for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
                        struct list_head *ep = &bwt->interval_bw[j].endpoints;
index c020b09..c4a8fca 100644 (file)
@@ -3971,13 +3971,21 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
        int ret;
 
        spin_lock_irqsave(&xhci->lock, flags);
-       if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
+
+       virt_dev = xhci->devs[udev->slot_id];
+
+       /*
+        * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
+        * xHC was re-initialized. Exit latency will be set later after
+        * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
+        */
+
+       if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
                spin_unlock_irqrestore(&xhci->lock, flags);
                return 0;
        }
 
        /* Attempt to issue an Evaluate Context command to change the MEL. */
-       virt_dev = xhci->devs[udev->slot_id];
        command = xhci->lpm_command;
        ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
        if (!ctrl_ctx) {
index 47ae645..3ee133f 100644 (file)
@@ -39,6 +39,7 @@ struct cppi41_dma_channel {
        u32 transferred;
        u32 packet_sz;
        struct list_head tx_check;
+       int tx_zlp;
 };
 
 #define MUSB_DMA_NUM_CHANNELS 15
@@ -122,6 +123,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
 {
        struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
        struct musb *musb = hw_ep->musb;
+       void __iomem *epio = hw_ep->regs;
+       u16 csr;
 
        if (!cppi41_channel->prog_len ||
            (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
@@ -131,15 +134,24 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
                        cppi41_channel->transferred;
                cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
                cppi41_channel->channel.rx_packet_done = true;
+
+               /*
+                * transmit ZLP using PIO mode for transfers which size is
+                * multiple of EP packet size.
+                */
+               if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
+                                       cppi41_channel->packet_sz) == 0) {
+                       musb_ep_select(musb->mregs, hw_ep->epnum);
+                       csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
+                       musb_writew(epio, MUSB_TXCSR, csr);
+               }
                musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
        } else {
                /* next iteration, reload */
                struct dma_chan *dc = cppi41_channel->dc;
                struct dma_async_tx_descriptor *dma_desc;
                enum dma_transfer_direction direction;
-               u16 csr;
                u32 remain_bytes;
-               void __iomem *epio = cppi41_channel->hw_ep->regs;
 
                cppi41_channel->buf_addr += cppi41_channel->packet_sz;
 
@@ -363,6 +375,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
        cppi41_channel->total_len = len;
        cppi41_channel->transferred = 0;
        cppi41_channel->packet_sz = packet_sz;
+       cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
 
        /*
         * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
index c42bdf0..00972ec 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2012-2014 Freescale Semiconductor, Inc.
  * Copyright (C) 2012 Marek Vasut <marex@denx.de>
  * on behalf of DENX Software Engineering GmbH
  *
@@ -125,7 +125,13 @@ static const struct mxs_phy_data imx6sl_phy_data = {
                MXS_PHY_NEED_IP_FIX,
 };
 
+static const struct mxs_phy_data imx6sx_phy_data = {
+       .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+               MXS_PHY_NEED_IP_FIX,
+};
+
 static const struct of_device_id mxs_phy_dt_ids[] = {
+       { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, },
        { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
        { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
        { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
index 13b4fa2..886f180 100644 (file)
@@ -878,8 +878,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
                return -ENOMEM;
        }
 
-       tegra_phy->config = devm_kzalloc(&pdev->dev,
-               sizeof(*tegra_phy->config), GFP_KERNEL);
+       tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config),
+                                        GFP_KERNEL);
        if (!tegra_phy->config) {
                dev_err(&pdev->dev,
                        "unable to allocate memory for USB UTMIP config\n");
index 4fd3653..b0c97a3 100644 (file)
@@ -108,19 +108,45 @@ static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
        return list_first_entry(&pipe->list, struct usbhs_pkt, node);
 }
 
+static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
+                             struct usbhs_fifo *fifo);
+static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
+                                struct usbhs_fifo *fifo);
+static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
+                                           struct usbhs_pkt *pkt);
+#define usbhsf_dma_map(p)      __usbhsf_dma_map_ctrl(p, 1)
+#define usbhsf_dma_unmap(p)    __usbhsf_dma_map_ctrl(p, 0)
+static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
 {
        struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+       struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
        unsigned long flags;
 
        /********************  spin lock ********************/
        usbhs_lock(priv, flags);
 
+       usbhs_pipe_disable(pipe);
+
        if (!pkt)
                pkt = __usbhsf_pkt_get(pipe);
 
-       if (pkt)
+       if (pkt) {
+               struct dma_chan *chan = NULL;
+
+               if (fifo)
+                       chan = usbhsf_dma_chan_get(fifo, pkt);
+               if (chan) {
+                       dmaengine_terminate_all(chan);
+                       usbhsf_fifo_clear(pipe, fifo);
+                       usbhsf_dma_unmap(pkt);
+               }
+
                __usbhsf_pkt_del(pkt);
+       }
+
+       if (fifo)
+               usbhsf_fifo_unselect(pipe, fifo);
 
        usbhs_unlock(priv, flags);
        /********************  spin unlock ******************/
@@ -544,6 +570,7 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
                usbhsf_send_terminator(pipe, fifo);
 
        usbhsf_tx_irq_ctrl(pipe, !*is_done);
+       usbhs_pipe_running(pipe, !*is_done);
        usbhs_pipe_enable(pipe);
 
        dev_dbg(dev, "  send %d (%d/ %d/ %d/ %d)\n",
@@ -570,12 +597,21 @@ usbhs_fifo_write_busy:
         * retry in interrupt
         */
        usbhsf_tx_irq_ctrl(pipe, 1);
+       usbhs_pipe_running(pipe, 1);
 
        return ret;
 }
 
+static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+{
+       if (usbhs_pipe_is_running(pkt->pipe))
+               return 0;
+
+       return usbhsf_pio_try_push(pkt, is_done);
+}
+
 struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
-       .prepare = usbhsf_pio_try_push,
+       .prepare = usbhsf_pio_prepare_push,
        .try_run = usbhsf_pio_try_push,
 };
 
@@ -589,6 +625,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
        if (usbhs_pipe_is_busy(pipe))
                return 0;
 
+       if (usbhs_pipe_is_running(pipe))
+               return 0;
+
        /*
         * pipe enable to prepare packet receive
         */
@@ -597,6 +636,7 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
 
        usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
        usbhs_pipe_enable(pipe);
+       usbhs_pipe_running(pipe, 1);
        usbhsf_rx_irq_ctrl(pipe, 1);
 
        return 0;
@@ -642,6 +682,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
            (total_len < maxp)) {               /* short packet */
                *is_done = 1;
                usbhsf_rx_irq_ctrl(pipe, 0);
+               usbhs_pipe_running(pipe, 0);
                usbhs_pipe_disable(pipe);       /* disable pipe first */
        }
 
@@ -763,8 +804,6 @@ static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
        usbhs_bset(priv, fifo->sel, DREQE, dreqe);
 }
 
-#define usbhsf_dma_map(p)      __usbhsf_dma_map_ctrl(p, 1)
-#define usbhsf_dma_unmap(p)    __usbhsf_dma_map_ctrl(p, 0)
 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
@@ -805,6 +844,7 @@ static void xfer_work(struct work_struct *work)
        dev_dbg(dev, "  %s %d (%d/ %d)\n",
                fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
 
+       usbhs_pipe_running(pipe, 1);
        usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
        usbhs_pipe_enable(pipe);
        usbhsf_dma_start(pipe, fifo);
@@ -836,6 +876,10 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
        if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
                goto usbhsf_pio_prepare_push;
 
+       /* return at this time if the pipe is running */
+       if (usbhs_pipe_is_running(pipe))
+               return 0;
+
        /* get enable DMA fifo */
        fifo = usbhsf_get_dma_fifo(priv, pkt);
        if (!fifo)
@@ -869,15 +913,29 @@ usbhsf_pio_prepare_push:
 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
+       int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
+
+       pkt->actual += pkt->trans;
 
-       pkt->actual = pkt->trans;
+       if (pkt->actual < pkt->length)
+               *is_done = 0;           /* there are remainder data */
+       else if (is_short)
+               *is_done = 1;           /* short packet */
+       else
+               *is_done = !pkt->zero;  /* send zero packet? */
 
-       *is_done = !pkt->zero;  /* send zero packet ? */
+       usbhs_pipe_running(pipe, !*is_done);
 
        usbhsf_dma_stop(pipe, pipe->fifo);
        usbhsf_dma_unmap(pkt);
        usbhsf_fifo_unselect(pipe, pipe->fifo);
 
+       if (!*is_done) {
+               /* change handler to PIO */
+               pkt->handler = &usbhs_fifo_pio_push_handler;
+               return pkt->handler->try_run(pkt, is_done);
+       }
+
        return 0;
 }
 
@@ -972,8 +1030,10 @@ static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
        if ((pkt->actual == pkt->length) ||     /* receive all data */
            (pkt->trans < maxp)) {              /* short packet */
                *is_done = 1;
+               usbhs_pipe_running(pipe, 0);
        } else {
                /* re-enable */
+               usbhs_pipe_running(pipe, 0);
                usbhsf_prepare_pop(pkt, is_done);
        }
 
index 6a030b9..9a705b1 100644 (file)
@@ -213,7 +213,10 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv,
 {
        struct usbhs_mod *mod = usbhs_mod_get_current(priv);
        u16 intenb0, intenb1;
+       unsigned long flags;
 
+       /********************  spin lock ********************/
+       usbhs_lock(priv, flags);
        state->intsts0 = usbhs_read(priv, INTSTS0);
        state->intsts1 = usbhs_read(priv, INTSTS1);
 
@@ -229,6 +232,8 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv,
                state->bempsts &= mod->irq_bempsts;
                state->brdysts &= mod->irq_brdysts;
        }
+       usbhs_unlock(priv, flags);
+       /********************  spin unlock ******************/
 
        /*
         * Check whether the irq enable registers and the irq status are set
index 75fbcf6..040bcef 100644 (file)
@@ -578,6 +578,19 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
        return usbhsp_flags_has(pipe, IS_DIR_HOST);
 }
 
+int usbhs_pipe_is_running(struct usbhs_pipe *pipe)
+{
+       return usbhsp_flags_has(pipe, IS_RUNNING);
+}
+
+void usbhs_pipe_running(struct usbhs_pipe *pipe, int running)
+{
+       if (running)
+               usbhsp_flags_set(pipe, IS_RUNNING);
+       else
+               usbhsp_flags_clr(pipe, IS_RUNNING);
+}
+
 void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
 {
        u16 mask = (SQCLR | SQSET);
index 406f36d..d24a059 100644 (file)
@@ -36,6 +36,7 @@ struct usbhs_pipe {
 #define USBHS_PIPE_FLAGS_IS_USED               (1 << 0)
 #define USBHS_PIPE_FLAGS_IS_DIR_IN             (1 << 1)
 #define USBHS_PIPE_FLAGS_IS_DIR_HOST           (1 << 2)
+#define USBHS_PIPE_FLAGS_IS_RUNNING            (1 << 3)
 
        struct usbhs_pkt_handle *handler;
 
@@ -80,6 +81,9 @@ int usbhs_pipe_probe(struct usbhs_priv *priv);
 void usbhs_pipe_remove(struct usbhs_priv *priv);
 int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe);
 int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe);
+int usbhs_pipe_is_running(struct usbhs_pipe *pipe);
+void usbhs_pipe_running(struct usbhs_pipe *pipe, int running);
+
 void usbhs_pipe_init(struct usbhs_priv *priv,
                     int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map));
 int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe);
index 824ea5e..dc72b92 100644 (file)
@@ -728,6 +728,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
                .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
        { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+       { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
@@ -939,6 +940,8 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
        /* Infineon Devices */
        { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+       /* GE Healthcare devices */
+       { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
        { }                                     /* Terminating entry */
 };
 
index 70b0b1d..5937b2d 100644 (file)
 #define TELLDUS_VID                    0x1781  /* Vendor ID */
 #define TELLDUS_TELLSTICK_PID          0x0C30  /* RF control dongle 433 MHz using FT232RL */
 
+/*
+ * NOVITUS printers
+ */
+#define NOVITUS_VID                    0x1a28
+#define NOVITUS_BONO_E_PID             0x6010
+
 /*
  * RT Systems programming cables for various ham radios
  */
  * ekey biometric systems GmbH (http://ekey.net/)
  */
 #define FTDI_EKEY_CONV_USB_PID         0xCB08  /* Converter USB */
+
+/*
+ * GE Healthcare devices
+ */
+#define GE_HEALTHCARE_VID              0x1901
+#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015
index 6f7f01e..46179a0 100644 (file)
@@ -282,14 +282,19 @@ static const struct usb_device_id id_table[] = {
        /* Sierra Wireless HSPA Non-Composite Device */
        { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
        { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
-       { USB_DEVICE(0x1199, 0x68A3),   /* Sierra Wireless Direct IP modems */
+       /* Sierra Wireless Direct IP modems */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF),
+         .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+       },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
        /* AT&T Direct IP LTE modems */
        { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
-       { USB_DEVICE(0x0f3d, 0x68A3),   /* Airprime/Sierra Wireless Direct IP modems */
+       /* Airprime/Sierra Wireless Direct IP modems */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
 
index 1a132e9..c9bb107 100644 (file)
@@ -272,6 +272,14 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
 }
 
 static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x19d2, 0xffec) },
+       { USB_DEVICE(0x19d2, 0xffee) },
+       { USB_DEVICE(0x19d2, 0xfff6) },
+       { USB_DEVICE(0x19d2, 0xfff7) },
+       { USB_DEVICE(0x19d2, 0xfff8) },
+       { USB_DEVICE(0x19d2, 0xfff9) },
+       { USB_DEVICE(0x19d2, 0xfffb) },
+       { USB_DEVICE(0x19d2, 0xfffc) },
        /* MG880 */
        { USB_DEVICE(0x19d2, 0xfffd) },
        { },
index 503ac5c..8a6f371 100644 (file)
@@ -59,10 +59,6 @@ static int uas_use_uas_driver(struct usb_interface *intf,
        unsigned long flags = id->driver_info;
        int r, alt;
 
-       usb_stor_adjust_quirks(udev, &flags);
-
-       if (flags & US_FL_IGNORE_UAS)
-               return 0;
 
        alt = uas_find_uas_alt_setting(intf);
        if (alt < 0)
@@ -72,6 +68,29 @@ static int uas_use_uas_driver(struct usb_interface *intf,
        if (r < 0)
                return 0;
 
+       /*
+        * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is
+        * broken on the ASM1051, use the number of streams to differentiate.
+        * New ASM1053-s also support 32 streams, but have a different prod-id.
+        */
+       if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c &&
+                       le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) {
+               if (udev->speed < USB_SPEED_SUPER) {
+                       /* No streams info, assume ASM1051 */
+                       flags |= US_FL_IGNORE_UAS;
+               } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
+                       flags |= US_FL_IGNORE_UAS;
+               }
+       }
+
+       usb_stor_adjust_quirks(udev, &flags);
+
+       if (flags & US_FL_IGNORE_UAS) {
+               dev_warn(&udev->dev,
+                       "UAS is blacklisted for this device, using usb-storage instead\n");
+               return 0;
+       }
+
        if (udev->bus->sg_tablesize == 0) {
                dev_warn(&udev->dev,
                        "The driver for the USB controller %s does not support scatter-gather which is\n",
index 7ef99b2..4a5c68a 100644 (file)
@@ -101,6 +101,12 @@ UNUSUAL_DEV(  0x03f0, 0x4002, 0x0001, 0x0001,
                "PhotoSmart R707",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY),
 
+UNUSUAL_DEV(  0x03f3, 0x0001, 0x0000, 0x9999,
+               "Adaptec",
+               "USBConnect 2000",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Reported by Sebastian Kapfer <sebastian_kapfer@gmx.net>
  * and Olaf Hering <olh@suse.de> (different bcd's, same vendor/product)
  * for USB floppies that need the SINGLE_LUN enforcement.
@@ -741,6 +747,12 @@ UNUSUAL_DEV(  0x059b, 0x0001, 0x0100, 0x0100,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_SINGLE_LUN ),
 
+UNUSUAL_DEV(  0x059b, 0x0040, 0x0100, 0x0100,
+               "Iomega",
+               "Jaz USB Adapter",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_SINGLE_LUN ),
+
 /* Reported by <Hendryk.Pfeiffer@gmx.de> */
 UNUSUAL_DEV(  0x059f, 0x0643, 0x0000, 0x0000,
                "LaCie",
@@ -1119,6 +1131,18 @@ UNUSUAL_DEV(  0x0851, 0x1543, 0x0200, 0x0200,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NOT_LOCKABLE),
 
+UNUSUAL_DEV(  0x085a, 0x0026, 0x0100, 0x0133,
+               "Xircom",
+               "PortGear USB-SCSI (Mac USB Dock)",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
+UNUSUAL_DEV(  0x085a, 0x0028, 0x0100, 0x0133,
+               "Xircom",
+               "PortGear USB to SCSI Converter",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Submitted by Jan De Luyck <lkml@kcore.org> */
 UNUSUAL_DEV(  0x08bd, 0x1100, 0x0000, 0x0000,
                "CITIZEN",
@@ -1958,6 +1982,14 @@ UNUSUAL_DEV(  0x152d, 0x2329, 0x0100, 0x0100,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
 
+/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
+ * and Mac USB Dock USB-SCSI */
+UNUSUAL_DEV(  0x1645, 0x0007, 0x0100, 0x0133,
+               "Entrega Technologies",
+               "USB to SCSI Converter",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Reported by Robert Schedel <r.schedel@yahoo.de>
  * Note: this is a 'super top' device like the above 14cd/6600 device */
 UNUSUAL_DEV(  0x1652, 0x6600, 0x0201, 0x0201,
@@ -1980,6 +2012,12 @@ UNUSUAL_DEV(  0x177f, 0x0400, 0x0000, 0x0000,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
 
+UNUSUAL_DEV(  0x1822, 0x0001, 0x0000, 0x9999,
+               "Ariston Technologies",
+               "iConnect USB to SCSI adapter",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Reported by Hans de Goede <hdegoede@redhat.com>
  * These Appotech controllers are found in Picture Frames, they provide a
  * (buggy) emulation of a cdrom drive which contains the windows software
index 80079b8..d0303f0 100644 (file)
@@ -431,16 +431,19 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
        uwb_dev->mac_addr = *bce->mac_addr;
        uwb_dev->dev_addr = bce->dev_addr;
        dev_set_name(&uwb_dev->dev, "%s", macbuf);
+
+       /* plug the beacon cache */
+       bce->uwb_dev = uwb_dev;
+       uwb_dev->bce = bce;
+       uwb_bce_get(bce);               /* released in uwb_dev_sys_release() */
+
        result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
        if (result < 0) {
                dev_err(dev, "new device %s: cannot instantiate device\n",
                        macbuf);
                goto error_dev_add;
        }
-       /* plug the beacon cache */
-       bce->uwb_dev = uwb_dev;
-       uwb_dev->bce = bce;
-       uwb_bce_get(bce);               /* released in uwb_dev_sys_release() */
+
        dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
                 macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name,
                 dev_name(rc->uwb_dev.dev.parent));
@@ -448,6 +451,8 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
        return;
 
 error_dev_add:
+       bce->uwb_dev = NULL;
+       uwb_bce_put(bce);
        kfree(uwb_dev);
        return;
 }
index a7b6217..6ad23bd 100644 (file)
@@ -639,9 +639,7 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0)
                if (g0 != panels[i].g0)
                        continue;
                if (r0 == panels[i].r0 && b0 == panels[i].b0)
-                       fb->panel->caps = panels[i].caps & CLCD_CAP_RGB;
-               if (r0 == panels[i].b0 && b0 == panels[i].r0)
-                       fb->panel->caps = panels[i].caps & CLCD_CAP_BGR;
+                       fb->panel->caps = panels[i].caps;
        }
 
        return fb->panel->caps ? 0 : -EINVAL;
index 5c660c7..1e0a317 100644 (file)
@@ -230,8 +230,8 @@ static enum bp_state reserve_additional_memory(long credit)
        rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
 
        if (rc) {
-               pr_info("%s: add_memory() failed: %i\n", __func__, rc);
-               return BP_EAGAIN;
+               pr_warn("Cannot add additional memory (%i)\n", rc);
+               return BP_ECANCELED;
        }
 
        balloon_hotplug -= credit;
index 787d179..e53fe19 100644 (file)
@@ -124,7 +124,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
        int i, rc, readonly;
        LIST_HEAD(queue_gref);
        LIST_HEAD(queue_file);
-       struct gntalloc_gref *gref;
+       struct gntalloc_gref *gref, *next;
 
        readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
        rc = -ENOMEM;
@@ -141,13 +141,11 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
                        goto undo;
 
                /* Grant foreign access to the page. */
-               gref->gref_id = gnttab_grant_foreign_access(op->domid,
+               rc = gnttab_grant_foreign_access(op->domid,
                        pfn_to_mfn(page_to_pfn(gref->page)), readonly);
-               if ((int)gref->gref_id < 0) {
-                       rc = gref->gref_id;
+               if (rc < 0)
                        goto undo;
-               }
-               gref_ids[i] = gref->gref_id;
+               gref_ids[i] = gref->gref_id = rc;
        }
 
        /* Add to gref lists. */
@@ -162,8 +160,8 @@ undo:
        mutex_lock(&gref_mutex);
        gref_size -= (op->count - i);
 
-       list_for_each_entry(gref, &queue_file, next_file) {
-               /* __del_gref does not remove from queue_file */
+       list_for_each_entry_safe(gref, next, &queue_file, next_file) {
+               list_del(&gref->next_file);
                __del_gref(gref);
        }
 
@@ -193,7 +191,7 @@ static void __del_gref(struct gntalloc_gref *gref)
 
        gref->notify.flags = 0;
 
-       if (gref->gref_id > 0) {
+       if (gref->gref_id) {
                if (gnttab_query_foreign_access(gref->gref_id))
                        return;
 
index 5f1e1f3..f8bb36f 100644 (file)
@@ -103,16 +103,11 @@ static void do_suspend(void)
 
        shutting_down = SHUTDOWN_SUSPEND;
 
-#ifdef CONFIG_PREEMPT
-       /* If the kernel is preemptible, we need to freeze all the processes
-          to prevent them from being in the middle of a pagetable update
-          during suspend. */
        err = freeze_processes();
        if (err) {
                pr_err("%s: freeze failed %d\n", __func__, err);
                goto out;
        }
-#endif
 
        err = dpm_suspend_start(PMSG_FREEZE);
        if (err) {
@@ -157,10 +152,8 @@ out_resume:
        dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
 
 out_thaw:
-#ifdef CONFIG_PREEMPT
        thaw_processes();
 out:
-#endif
        shutting_down = SHUTDOWN_INVALID;
 }
 #endif /* CONFIG_HIBERNATE_CALLBACKS */
index 43527fd..56b8522 100644 (file)
@@ -234,8 +234,17 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
            BTRFS_I(inode)->last_sub_trans <=
            BTRFS_I(inode)->last_log_commit &&
            BTRFS_I(inode)->last_sub_trans <=
-           BTRFS_I(inode)->root->last_log_commit)
-               return 1;
+           BTRFS_I(inode)->root->last_log_commit) {
+               /*
+                * After a ranged fsync we might have left some extent maps
+                * (that fall outside the fsync's range). So return false
+                * here if the list isn't empty, to make sure btrfs_log_inode()
+                * will be called and process those extent maps.
+                */
+               smp_mb();
+               if (list_empty(&BTRFS_I(inode)->extent_tree.modified_extents))
+                       return 1;
+       }
        return 0;
 }
 
index 36861b7..ff1cc03 100644 (file)
@@ -1966,7 +1966,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        btrfs_init_log_ctx(&ctx);
 
-       ret = btrfs_log_dentry_safe(trans, root, dentry, &ctx);
+       ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
        if (ret < 0) {
                /* Fallthrough and commit/free transaction. */
                ret = 1;
index 9c194bd..016c403 100644 (file)
@@ -778,8 +778,12 @@ retry:
                                                ins.offset,
                                                BTRFS_ORDERED_COMPRESSED,
                                                async_extent->compress_type);
-               if (ret)
+               if (ret) {
+                       btrfs_drop_extent_cache(inode, async_extent->start,
+                                               async_extent->start +
+                                               async_extent->ram_size - 1, 0);
                        goto out_free_reserve;
+               }
 
                /*
                 * clear dirty, set writeback and unlock the pages.
@@ -971,14 +975,14 @@ static noinline int cow_file_range(struct inode *inode,
                ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
                                               ram_size, cur_alloc_size, 0);
                if (ret)
-                       goto out_reserve;
+                       goto out_drop_extent_cache;
 
                if (root->root_key.objectid ==
                    BTRFS_DATA_RELOC_TREE_OBJECTID) {
                        ret = btrfs_reloc_clone_csums(inode, start,
                                                      cur_alloc_size);
                        if (ret)
-                               goto out_reserve;
+                               goto out_drop_extent_cache;
                }
 
                if (disk_num_bytes < cur_alloc_size)
@@ -1006,6 +1010,8 @@ static noinline int cow_file_range(struct inode *inode,
 out:
        return ret;
 
+out_drop_extent_cache:
+       btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
 out_reserve:
        btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
 out_unlock:
@@ -4242,7 +4248,8 @@ out:
                        btrfs_abort_transaction(trans, root, ret);
        }
 error:
-       if (last_size != (u64)-1)
+       if (last_size != (u64)-1 &&
+           root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
                btrfs_ordered_update_i_size(inode, last_size, NULL);
        btrfs_free_path(path);
        return err;
@@ -5627,6 +5634,17 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index)
        return ret;
 }
 
+static int btrfs_insert_inode_locked(struct inode *inode)
+{
+       struct btrfs_iget_args args;
+       args.location = &BTRFS_I(inode)->location;
+       args.root = BTRFS_I(inode)->root;
+
+       return insert_inode_locked4(inode,
+                  btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
+                  btrfs_find_actor, &args);
+}
+
 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root,
                                     struct inode *dir,
@@ -5719,10 +5737,19 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                sizes[1] = name_len + sizeof(*ref);
        }
 
+       location = &BTRFS_I(inode)->location;
+       location->objectid = objectid;
+       location->offset = 0;
+       btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
+
+       ret = btrfs_insert_inode_locked(inode);
+       if (ret < 0)
+               goto fail;
+
        path->leave_spinning = 1;
        ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
        if (ret != 0)
-               goto fail;
+               goto fail_unlock;
 
        inode_init_owner(inode, dir, mode);
        inode_set_bytes(inode, 0);
@@ -5745,11 +5772,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(path->nodes[0]);
        btrfs_free_path(path);
 
-       location = &BTRFS_I(inode)->location;
-       location->objectid = objectid;
-       location->offset = 0;
-       btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
-
        btrfs_inherit_iflags(inode, dir);
 
        if (S_ISREG(mode)) {
@@ -5760,7 +5782,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                                BTRFS_INODE_NODATASUM;
        }
 
-       btrfs_insert_inode_hash(inode);
        inode_tree_add(inode);
 
        trace_btrfs_inode_new(inode);
@@ -5775,6 +5796,9 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                          btrfs_ino(inode), root->root_key.objectid, ret);
 
        return inode;
+
+fail_unlock:
+       unlock_new_inode(inode);
 fail:
        if (dir && name)
                BTRFS_I(dir)->index_cnt--;
@@ -5909,28 +5933,28 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
-       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
-       if (err) {
-               drop_inode = 1;
-               goto out_unlock;
-       }
-
        /*
        * If the active LSM wants to access the inode during
        * d_instantiate it needs these. Smack checks to see
        * if the filesystem supports xattrs by looking at the
        * ops vector.
        */
-
        inode->i_op = &btrfs_special_inode_operations;
-       err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
+       init_special_inode(inode, inode->i_mode, rdev);
+
+       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
        if (err)
-               drop_inode = 1;
-       else {
-               init_special_inode(inode, inode->i_mode, rdev);
+               goto out_unlock_inode;
+
+       err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
+       if (err) {
+               goto out_unlock_inode;
+       } else {
                btrfs_update_inode(trans, root, inode);
+               unlock_new_inode(inode);
                d_instantiate(dentry, inode);
        }
+
 out_unlock:
        btrfs_end_transaction(trans, root);
        btrfs_balance_delayed_items(root);
@@ -5940,6 +5964,12 @@ out_unlock:
                iput(inode);
        }
        return err;
+
+out_unlock_inode:
+       drop_inode = 1;
+       unlock_new_inode(inode);
+       goto out_unlock;
+
 }
 
 static int btrfs_create(struct inode *dir, struct dentry *dentry,
@@ -5974,15 +6004,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
        drop_inode_on_err = 1;
-
-       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
-       if (err)
-               goto out_unlock;
-
-       err = btrfs_update_inode(trans, root, inode);
-       if (err)
-               goto out_unlock;
-
        /*
        * If the active LSM wants to access the inode during
        * d_instantiate it needs these. Smack checks to see
@@ -5991,14 +6012,23 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        */
        inode->i_fop = &btrfs_file_operations;
        inode->i_op = &btrfs_file_inode_operations;
+       inode->i_mapping->a_ops = &btrfs_aops;
+       inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+
+       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
+       if (err)
+               goto out_unlock_inode;
+
+       err = btrfs_update_inode(trans, root, inode);
+       if (err)
+               goto out_unlock_inode;
 
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
-               goto out_unlock;
+               goto out_unlock_inode;
 
-       inode->i_mapping->a_ops = &btrfs_aops;
-       inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
        BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+       unlock_new_inode(inode);
        d_instantiate(dentry, inode);
 
 out_unlock:
@@ -6010,6 +6040,11 @@ out_unlock:
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
        return err;
+
+out_unlock_inode:
+       unlock_new_inode(inode);
+       goto out_unlock;
+
 }
 
 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
@@ -6117,25 +6152,30 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        }
 
        drop_on_err = 1;
+       /* these must be set before we unlock the inode */
+       inode->i_op = &btrfs_dir_inode_operations;
+       inode->i_fop = &btrfs_dir_file_operations;
 
        err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
        if (err)
-               goto out_fail;
-
-       inode->i_op = &btrfs_dir_inode_operations;
-       inode->i_fop = &btrfs_dir_file_operations;
+               goto out_fail_inode;
 
        btrfs_i_size_write(inode, 0);
        err = btrfs_update_inode(trans, root, inode);
        if (err)
-               goto out_fail;
+               goto out_fail_inode;
 
        err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
                             dentry->d_name.len, 0, index);
        if (err)
-               goto out_fail;
+               goto out_fail_inode;
 
        d_instantiate(dentry, inode);
+       /*
+        * mkdir is special.  We're unlocking after we call d_instantiate
+        * to avoid a race with nfsd calling d_instantiate.
+        */
+       unlock_new_inode(inode);
        drop_on_err = 0;
 
 out_fail:
@@ -6145,6 +6185,10 @@ out_fail:
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
        return err;
+
+out_fail_inode:
+       unlock_new_inode(inode);
+       goto out_fail;
 }
 
 /* helper for btfs_get_extent.  Given an existing extent in the tree,
@@ -8100,6 +8144,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
 
        set_nlink(inode, 1);
        btrfs_i_size_write(inode, 0);
+       unlock_new_inode(inode);
 
        err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
        if (err)
@@ -8760,12 +8805,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
-       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
-       if (err) {
-               drop_inode = 1;
-               goto out_unlock;
-       }
-
        /*
        * If the active LSM wants to access the inode during
        * d_instantiate it needs these. Smack checks to see
@@ -8774,23 +8813,22 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        */
        inode->i_fop = &btrfs_file_operations;
        inode->i_op = &btrfs_file_inode_operations;
+       inode->i_mapping->a_ops = &btrfs_aops;
+       inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+       BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+
+       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
+       if (err)
+               goto out_unlock_inode;
 
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
-               drop_inode = 1;
-       else {
-               inode->i_mapping->a_ops = &btrfs_aops;
-               inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-               BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
-       }
-       if (drop_inode)
-               goto out_unlock;
+               goto out_unlock_inode;
 
        path = btrfs_alloc_path();
        if (!path) {
                err = -ENOMEM;
-               drop_inode = 1;
-               goto out_unlock;
+               goto out_unlock_inode;
        }
        key.objectid = btrfs_ino(inode);
        key.offset = 0;
@@ -8799,9 +8837,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        err = btrfs_insert_empty_item(trans, root, path, &key,
                                      datasize);
        if (err) {
-               drop_inode = 1;
                btrfs_free_path(path);
-               goto out_unlock;
+               goto out_unlock_inode;
        }
        leaf = path->nodes[0];
        ei = btrfs_item_ptr(leaf, path->slots[0],
@@ -8825,12 +8862,15 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        inode_set_bytes(inode, name_len);
        btrfs_i_size_write(inode, name_len);
        err = btrfs_update_inode(trans, root, inode);
-       if (err)
+       if (err) {
                drop_inode = 1;
+               goto out_unlock_inode;
+       }
+
+       unlock_new_inode(inode);
+       d_instantiate(dentry, inode);
 
 out_unlock:
-       if (!err)
-               d_instantiate(dentry, inode);
        btrfs_end_transaction(trans, root);
        if (drop_inode) {
                inode_dec_link_count(inode);
@@ -8838,6 +8878,11 @@ out_unlock:
        }
        btrfs_btree_balance_dirty(root);
        return err;
+
+out_unlock_inode:
+       drop_inode = 1;
+       unlock_new_inode(inode);
+       goto out_unlock;
 }
 
 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -9021,14 +9066,6 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
                goto out;
        }
 
-       ret = btrfs_init_inode_security(trans, inode, dir, NULL);
-       if (ret)
-               goto out;
-
-       ret = btrfs_update_inode(trans, root, inode);
-       if (ret)
-               goto out;
-
        inode->i_fop = &btrfs_file_operations;
        inode->i_op = &btrfs_file_inode_operations;
 
@@ -9036,9 +9073,16 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
        inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
        BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
 
+       ret = btrfs_init_inode_security(trans, inode, dir, NULL);
+       if (ret)
+               goto out_inode;
+
+       ret = btrfs_update_inode(trans, root, inode);
+       if (ret)
+               goto out_inode;
        ret = btrfs_orphan_add(trans, inode);
        if (ret)
-               goto out;
+               goto out_inode;
 
        /*
         * We set number of links to 0 in btrfs_new_inode(), and here we set
@@ -9048,6 +9092,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
         *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
         */
        set_nlink(inode, 1);
+       unlock_new_inode(inode);
        d_tmpfile(dentry, inode);
        mark_inode_dirty(inode);
 
@@ -9057,8 +9102,12 @@ out:
                iput(inode);
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
-
        return ret;
+
+out_inode:
+       unlock_new_inode(inode);
+       goto out;
+
 }
 
 static const struct inode_operations btrfs_dir_inode_operations = {
index fce6fd0..8a8e298 100644 (file)
@@ -1019,8 +1019,10 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
                return false;
 
        next = defrag_lookup_extent(inode, em->start + em->len);
-       if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE ||
-           (em->block_start + em->block_len == next->block_start))
+       if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
+               ret = false;
+       else if ((em->block_start + em->block_len == next->block_start) &&
+                (em->block_len > 128 * 1024 && next->block_len > 128 * 1024))
                ret = false;
 
        free_extent_map(next);
@@ -1055,7 +1057,6 @@ static int should_defrag_range(struct inode *inode, u64 start, int thresh,
        }
 
        next_mergeable = defrag_check_next_extent(inode, em);
-
        /*
         * we hit a real extent, if it is big or the next extent is not a
         * real extent, don't bother defragging it
@@ -1702,7 +1703,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
            ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
              BTRFS_SUBVOL_QGROUP_INHERIT)) {
                ret = -EOPNOTSUPP;
-               goto out;
+               goto free_args;
        }
 
        if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
@@ -1712,27 +1713,31 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
        if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
                if (vol_args->size > PAGE_CACHE_SIZE) {
                        ret = -EINVAL;
-                       goto out;
+                       goto free_args;
                }
                inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
                if (IS_ERR(inherit)) {
                        ret = PTR_ERR(inherit);
-                       goto out;
+                       goto free_args;
                }
        }
 
        ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
                                              vol_args->fd, subvol, ptr,
                                              readonly, inherit);
+       if (ret)
+               goto free_inherit;
 
-       if (ret == 0 && ptr &&
-           copy_to_user(arg +
-                        offsetof(struct btrfs_ioctl_vol_args_v2,
-                                 transid), ptr, sizeof(*ptr)))
+       if (ptr && copy_to_user(arg +
+                               offsetof(struct btrfs_ioctl_vol_args_v2,
+                                       transid),
+                               ptr, sizeof(*ptr)))
                ret = -EFAULT;
-out:
-       kfree(vol_args);
+
+free_inherit:
        kfree(inherit);
+free_args:
+       kfree(vol_args);
        return ret;
 }
 
@@ -2652,7 +2657,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args)) {
                ret = PTR_ERR(vol_args);
-               goto out;
+               goto err_drop;
        }
 
        vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
@@ -2670,6 +2675,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
 
 out:
        kfree(vol_args);
+err_drop:
        mnt_drop_write_file(file);
        return ret;
 }
index 7e0e6e3..1d1ba08 100644 (file)
 #define LOG_WALK_REPLAY_ALL 3
 
 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *root, struct inode *inode,
-                            int inode_only);
+                          struct btrfs_root *root, struct inode *inode,
+                          int inode_only,
+                          const loff_t start,
+                          const loff_t end);
 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root,
                             struct btrfs_path *path, u64 objectid);
@@ -3858,8 +3860,10 @@ process:
  * This handles both files and directories.
  */
 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *root, struct inode *inode,
-                            int inode_only)
+                          struct btrfs_root *root, struct inode *inode,
+                          int inode_only,
+                          const loff_t start,
+                          const loff_t end)
 {
        struct btrfs_path *path;
        struct btrfs_path *dst_path;
@@ -3876,6 +3880,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        int ins_nr;
        bool fast_search = false;
        u64 ino = btrfs_ino(inode);
+       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -4049,13 +4054,35 @@ log_extents:
                        goto out_unlock;
                }
        } else if (inode_only == LOG_INODE_ALL) {
-               struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
                struct extent_map *em, *n;
 
-               write_lock(&tree->lock);
-               list_for_each_entry_safe(em, n, &tree->modified_extents, list)
-                       list_del_init(&em->list);
-               write_unlock(&tree->lock);
+               write_lock(&em_tree->lock);
+               /*
+                * We can't just remove every em if we're called for a ranged
+                * fsync - that is, one that doesn't cover the whole possible
+                * file range (0 to LLONG_MAX). This is because we can have
+                * em's that fall outside the range we're logging and therefore
+                * their ordered operations haven't completed yet
+                * (btrfs_finish_ordered_io() not invoked yet). This means we
+                * didn't get their respective file extent item in the fs/subvol
+                * tree yet, and need to let the next fast fsync (one which
+                * consults the list of modified extent maps) find the em so
+                * that it logs a matching file extent item and waits for the
+                * respective ordered operation to complete (if it's still
+                * running).
+                *
+                * Removing every em outside the range we're logging would make
+                * the next fast fsync not log their matching file extent items,
+                * therefore making us lose data after a log replay.
+                */
+               list_for_each_entry_safe(em, n, &em_tree->modified_extents,
+                                        list) {
+                       const u64 mod_end = em->mod_start + em->mod_len - 1;
+
+                       if (em->mod_start >= start && mod_end <= end)
+                               list_del_init(&em->list);
+               }
+               write_unlock(&em_tree->lock);
        }
 
        if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
@@ -4065,6 +4092,7 @@ log_extents:
                        goto out_unlock;
                }
        }
+
        BTRFS_I(inode)->logged_trans = trans->transid;
        BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
 out_unlock:
@@ -4161,7 +4189,10 @@ out:
  */
 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
                                  struct btrfs_root *root, struct inode *inode,
-                                 struct dentry *parent, int exists_only,
+                                 struct dentry *parent,
+                                 const loff_t start,
+                                 const loff_t end,
+                                 int exists_only,
                                  struct btrfs_log_ctx *ctx)
 {
        int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
@@ -4207,7 +4238,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
        if (ret)
                goto end_no_trans;
 
-       ret = btrfs_log_inode(trans, root, inode, inode_only);
+       ret = btrfs_log_inode(trans, root, inode, inode_only, start, end);
        if (ret)
                goto end_trans;
 
@@ -4235,7 +4266,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
 
                if (BTRFS_I(inode)->generation >
                    root->fs_info->last_trans_committed) {
-                       ret = btrfs_log_inode(trans, root, inode, inode_only);
+                       ret = btrfs_log_inode(trans, root, inode, inode_only,
+                                             0, LLONG_MAX);
                        if (ret)
                                goto end_trans;
                }
@@ -4269,13 +4301,15 @@ end_no_trans:
  */
 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, struct dentry *dentry,
+                         const loff_t start,
+                         const loff_t end,
                          struct btrfs_log_ctx *ctx)
 {
        struct dentry *parent = dget_parent(dentry);
        int ret;
 
        ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent,
-                                    0, ctx);
+                                    start, end, 0, ctx);
        dput(parent);
 
        return ret;
@@ -4512,6 +4546,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
                    root->fs_info->last_trans_committed))
                return 0;
 
-       return btrfs_log_inode_parent(trans, root, inode, parent, 1, NULL);
+       return btrfs_log_inode_parent(trans, root, inode, parent, 0,
+                                     LLONG_MAX, 1, NULL);
 }
 
index 7f5b41b..e2e798a 100644 (file)
@@ -59,6 +59,8 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
 int btrfs_recover_log_trees(struct btrfs_root *tree_root);
 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, struct dentry *dentry,
+                         const loff_t start,
+                         const loff_t end,
                          struct btrfs_log_ctx *ctx);
 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root,
index 340a92d..2c2d6d1 100644 (file)
@@ -529,12 +529,12 @@ static noinline int device_list_add(const char *path,
                 */
 
                /*
-                * As of now don't allow update to btrfs_fs_device through
-                * the btrfs dev scan cli, after FS has been mounted.
+                * For now, we do allow update to btrfs_fs_device through the
+                * btrfs dev scan cli after FS has been mounted.  We're still
+                * tracking a problem where systems fail mount by subvolume id
+                * when we reject replacement on a mounted FS.
                 */
-               if (fs_devices->opened) {
-                       return -EBUSY;
-               } else {
+               if (!fs_devices->opened && found_transid < device->generation) {
                        /*
                         * That is if the FS is _not_ mounted and if you
                         * are here, that means there is more than one
@@ -542,8 +542,7 @@ static noinline int device_list_add(const char *path,
                         * with larger generation number or the last-in if
                         * generation are equal.
                         */
-                       if (found_transid < device->generation)
-                               return -EEXIST;
+                       return -EEXIST;
                }
 
                name = rcu_string_strdup(path, GFP_NOFS);
index 8f05111..3588a80 100644 (file)
@@ -1022,7 +1022,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
                bh = page_buffers(page);
                if (bh->b_size == size) {
                        end_block = init_page_buffers(page, bdev,
-                                               index << sizebits, size);
+                                               (sector_t)index << sizebits,
+                                               size);
                        goto done;
                }
                if (!try_to_free_buffers(page))
@@ -1043,7 +1044,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
         */
        spin_lock(&inode->i_mapping->private_lock);
        link_dev_buffers(page, bh);
-       end_block = init_page_buffers(page, bdev, index << sizebits, size);
+       end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
+                       size);
        spin_unlock(&inode->i_mapping->private_lock);
 done:
        ret = (block < end_block) ? 1 : -ENXIO;
index 5bf2b41..83e9c94 100644 (file)
@@ -779,7 +779,8 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
            !subdir->d_inode->i_op->lookup ||
            !subdir->d_inode->i_op->mkdir ||
            !subdir->d_inode->i_op->create ||
-           !subdir->d_inode->i_op->rename ||
+           (!subdir->d_inode->i_op->rename &&
+            !subdir->d_inode->i_op->rename2) ||
            !subdir->d_inode->i_op->rmdir ||
            !subdir->d_inode->i_op->unlink)
                goto check_error;
index 4b1fb5c..25e745b 100644 (file)
@@ -151,7 +151,6 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
        struct cachefiles_one_read *monitor;
        struct cachefiles_object *object;
        struct fscache_retrieval *op;
-       struct pagevec pagevec;
        int error, max;
 
        op = container_of(_op, struct fscache_retrieval, op);
@@ -160,8 +159,6 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
 
        _enter("{ino=%lu}", object->backer->d_inode->i_ino);
 
-       pagevec_init(&pagevec, 0);
-
        max = 8;
        spin_lock_irq(&object->work_lock);
 
@@ -396,7 +393,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
 {
        struct cachefiles_object *object;
        struct cachefiles_cache *cache;
-       struct pagevec pagevec;
        struct inode *inode;
        sector_t block0, block;
        unsigned shift;
@@ -427,8 +423,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
        op->op.flags |= FSCACHE_OP_ASYNC;
        op->op.processor = cachefiles_read_copier;
 
-       pagevec_init(&pagevec, 0);
-
        /* we assume the absence or presence of the first block is a good
         * enough indication for the page as a whole
         * - TODO: don't use bmap() for this as it is _not_ actually good
index 603f18a..a2172f3 100644 (file)
@@ -22,6 +22,11 @@ config CIFS
          support for OS/2 and Windows ME and similar servers is provided as
          well.
 
+         The module also provides optional support for the followon
+         protocols for CIFS including SMB3, which enables
+         useful performance and security features (see the description
+         of CONFIG_CIFS_SMB2).
+
          The cifs module provides an advanced network file system
          client for mounting to CIFS compliant servers.  It includes
          support for DFS (hierarchical name space), secure per-user
@@ -121,7 +126,8 @@ config CIFS_ACL
          depends on CIFS_XATTR && KEYS
          help
            Allows fetching CIFS/NTFS ACL from the server.  The DACL blob
-           is handed over to the application/caller.
+           is handed over to the application/caller.  See the man
+           page for getcifsacl for more information.
 
 config CIFS_DEBUG
        bool "Enable CIFS debugging routines"
@@ -162,7 +168,7 @@ config CIFS_NFSD_EXPORT
           Allows NFS server to export a CIFS mounted share (nfsd over cifs)
 
 config CIFS_SMB2
-       bool "SMB2 network file system support"
+       bool "SMB2 and SMB3 network file system support"
        depends on CIFS && INET
        select NLS
        select KEYS
@@ -170,16 +176,21 @@ config CIFS_SMB2
        select DNS_RESOLVER
 
        help
-         This enables experimental support for the SMB2 (Server Message Block
-         version 2) protocol. The SMB2 protocol is the successor to the
-         popular CIFS and SMB network file sharing protocols. SMB2 is the
-         native file sharing mechanism for recent versions of Windows
-         operating systems (since Vista).  SMB2 enablement will eventually
-         allow users better performance, security and features, than would be
-         possible with cifs. Note that smb2 mount options also are simpler
-         (compared to cifs) due to protocol improvements.
-
-         Unless you are a developer or tester, say N.
+         This enables support for the Server Message Block version 2
+         family of protocols, including SMB3.  SMB3 support is
+         enabled on mount by specifying "vers=3.0" in the mount
+         options. These protocols are the successors to the popular
+         CIFS and SMB network file sharing protocols. SMB3 is the
+         native file sharing mechanism for the more recent
+         versions of Windows (Windows 8 and Windows 2012 and
+         later) and Samba server and many others support SMB3 well.
+         In general SMB3 enables better performance, security
+         and features, than would be possible with CIFS (Note that
+         when mounting to Samba, due to the CIFS POSIX extensions,
+         CIFS mounts can provide slightly better POSIX compatibility
+         than SMB3 mounts do though). Note that SMB2/SMB3 mount
+         options are also slightly simpler (compared to CIFS) due
+         to protocol improvements.
 
 config CIFS_FSCACHE
          bool "Provide CIFS client caching support"
index b0fafa4..002e0c1 100644 (file)
@@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.04"
+#define CIFS_VERSION   "2.05"
 #endif                         /* _CIFSFS_H */
index dfc731b..25b8392 100644 (file)
 #define SERVER_NAME_LENGTH 40
 #define SERVER_NAME_LEN_WITH_NULL     (SERVER_NAME_LENGTH + 1)
 
-/* used to define string lengths for reversing unicode strings */
-/*         (256+1)*2 = 514                                     */
-/*           (max path length + 1 for null) * 2 for unicode    */
-#define MAX_NAME 514
-
 /* SMB echo "timeout" -- FIXME: tunable? */
 #define SMB_ECHO_INTERVAL (60 * HZ)
 
index 03ed8a0..36ca204 100644 (file)
@@ -1600,6 +1600,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        tmp_end++;
                        if (!(tmp_end < end && tmp_end[1] == delim)) {
                                /* No it is not. Set the password to NULL */
+                               kfree(vol->password);
                                vol->password = NULL;
                                break;
                        }
@@ -1637,6 +1638,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                        options = end;
                        }
 
+                       kfree(vol->password);
                        /* Now build new password string */
                        temp_len = strlen(value);
                        vol->password = kzalloc(temp_len+1, GFP_KERNEL);
index 3db0c5f..6cbd9c6 100644 (file)
@@ -497,6 +497,14 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
                goto out;
        }
 
+       if (file->f_flags & O_DIRECT &&
+           CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+               if (CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+                       file->f_op = &cifs_file_direct_nobrl_ops;
+               else
+                       file->f_op = &cifs_file_direct_ops;
+               }
+
        file_info = cifs_new_fileinfo(&fid, file, tlink, oplock);
        if (file_info == NULL) {
                if (server->ops->close)
index d5fec92..7c018a1 100644 (file)
@@ -467,6 +467,14 @@ int cifs_open(struct inode *inode, struct file *file)
        cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
                 inode, file->f_flags, full_path);
 
+       if (file->f_flags & O_DIRECT &&
+           cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+                       file->f_op = &cifs_file_direct_nobrl_ops;
+               else
+                       file->f_op = &cifs_file_direct_ops;
+       }
+
        if (server->oplocks)
                oplock = REQ_OPLOCK;
        else
index 949ec90..7899a40 100644 (file)
@@ -1720,7 +1720,10 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
 unlink_target:
        /* Try unlinking the target dentry if it's not negative */
        if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
-               tmprc = cifs_unlink(target_dir, target_dentry);
+               if (d_is_dir(target_dentry))
+                       tmprc = cifs_rmdir(target_dir, target_dentry);
+               else
+                       tmprc = cifs_unlink(target_dir, target_dentry);
                if (tmprc)
                        goto cifs_rename_exit;
                rc = cifs_do_rename(xid, source_dentry, from_name,
index 68559fd..5657416 100644 (file)
@@ -213,8 +213,12 @@ create_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon,
        if (rc)
                goto out;
 
-       rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon, cifs_sb,
-                                       fromName, buf, &bytes_written);
+       if (tcon->ses->server->ops->create_mf_symlink)
+               rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon,
+                                       cifs_sb, fromName, buf, &bytes_written);
+       else
+               rc = -EOPNOTSUPP;
+
        if (rc)
                goto out;
 
@@ -339,9 +343,11 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
        if (rc)
                return rc;
 
-       if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE))
+       if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
+               rc = -ENOENT;
                /* it's not a symlink */
                goto out;
+       }
 
        io_parms.netfid = fid.netfid;
        io_parms.pid = current->tgid;
index 6834b9c..b333ff6 100644 (file)
@@ -925,11 +925,23 @@ cifs_NTtimeToUnix(__le64 ntutc)
        /* BB what about the timezone? BB */
 
        /* Subtract the NTFS time offset, then convert to 1s intervals. */
-       u64 t;
+       s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+
+       /*
+        * Unfortunately can not use normal 64 bit division on 32 bit arch, but
+        * the alternative, do_div, does not work with negative numbers so have
+        * to special case them
+        */
+       if (t < 0) {
+               t = -t;
+               ts.tv_nsec = (long)(do_div(t, 10000000) * 100);
+               ts.tv_nsec = -ts.tv_nsec;
+               ts.tv_sec = -t;
+       } else {
+               ts.tv_nsec = (long)do_div(t, 10000000) * 100;
+               ts.tv_sec = t;
+       }
 
-       t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
-       ts.tv_nsec = do_div(t, 10000000) * 100;
-       ts.tv_sec = t;
        return ts;
 }
 
index 798c80a..b334a89 100644 (file)
@@ -596,8 +596,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
                if (server->ops->dir_needs_close(cfile)) {
                        cfile->invalidHandle = true;
                        spin_unlock(&cifs_file_list_lock);
-                       if (server->ops->close)
-                               server->ops->close(xid, tcon, &cfile->fid);
+                       if (server->ops->close_dir)
+                               server->ops->close_dir(xid, tcon, &cfile->fid);
                } else
                        spin_unlock(&cifs_file_list_lock);
                if (cfile->srch_inf.ntwrk_buf_start) {
index 39ee326..57db63f 100644 (file)
@@ -243,10 +243,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
        kfree(ses->serverOS);
 
        ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
-       if (ses->serverOS)
+       if (ses->serverOS) {
                strncpy(ses->serverOS, bcc_ptr, len);
-       if (strncmp(ses->serverOS, "OS/2", 4) == 0)
-               cifs_dbg(FYI, "OS/2 server\n");
+               if (strncmp(ses->serverOS, "OS/2", 4) == 0)
+                       cifs_dbg(FYI, "OS/2 server\n");
+       }
 
        bcc_ptr += len + 1;
        bleft -= len + 1;
@@ -744,14 +745,6 @@ out:
        sess_free_buffer(sess_data);
 }
 
-#else
-
-static void
-sess_auth_lanman(struct sess_data *sess_data)
-{
-       sess_data->result = -EOPNOTSUPP;
-       sess_data->func = NULL;
-}
 #endif
 
 static void
@@ -1102,15 +1095,6 @@ out:
        ses->auth_key.response = NULL;
 }
 
-#else
-
-static void
-sess_auth_kerberos(struct sess_data *sess_data)
-{
-       cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
-       sess_data->result = -ENOSYS;
-       sess_data->func = NULL;
-}
 #endif /* ! CONFIG_CIFS_UPCALL */
 
 /*
index 3f17b45..4599294 100644 (file)
@@ -50,7 +50,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
                goto out;
        }
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL) {
                rc = -ENOMEM;
index 0150182..899bbc8 100644 (file)
@@ -131,7 +131,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
        *adjust_tz = false;
        *symlink = false;
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL)
                return -ENOMEM;
index 5a48aa2..f522193 100644 (file)
@@ -389,7 +389,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
        int rc;
        struct smb2_file_all_info *smb2_data;
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL)
                return -ENOMEM;
@@ -1035,7 +1035,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
                if (keep_size == false)
                        return -EOPNOTSUPP;
 
-       /* 
+       /*
         * Must check if file sparse since fallocate -z (zero range) assumes
         * non-sparse allocation
         */
index fa0dd04..74b3a66 100644 (file)
@@ -530,7 +530,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
        struct smb2_sess_setup_rsp *rsp = NULL;
        struct kvec iov[2];
        int rc = 0;
-       int resp_buftype;
+       int resp_buftype = CIFS_NO_BUFFER;
        __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
        struct TCP_Server_Info *server = ses->server;
        u16 blob_length = 0;
@@ -1403,8 +1403,7 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
        rsp = (struct smb2_close_rsp *)iov[0].iov_base;
 
        if (rc != 0) {
-               if (tcon)
-                       cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
+               cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
                goto close_exit;
        }
 
@@ -1533,7 +1532,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
 {
        return query_info(xid, tcon, persistent_fid, volatile_fid,
                          FILE_ALL_INFORMATION,
-                         sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+                         sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                          sizeof(struct smb2_file_all_info), data);
 }
 
index d30ce69..7a5b514 100644 (file)
@@ -106,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
                                        unsigned int hash)
 {
        hash += (unsigned long) parent / L1_CACHE_BYTES;
-       hash = hash + (hash >> d_hash_shift);
-       return dentry_hashtable + (hash & d_hash_mask);
+       return dentry_hashtable + hash_32(hash, d_hash_shift);
 }
 
 /* Statistics gathering. */
@@ -2656,6 +2655,12 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
        dentry->d_parent = dentry;
        list_del_init(&dentry->d_u.d_child);
        anon->d_parent = dparent;
+       if (likely(!d_unhashed(anon))) {
+               hlist_bl_lock(&anon->d_sb->s_anon);
+               __hlist_bl_del(&anon->d_hash);
+               anon->d_hash.pprev = NULL;
+               hlist_bl_unlock(&anon->d_sb->s_anon);
+       }
        list_move(&anon->d_u.d_child, &dparent->d_subdirs);
 
        write_seqcount_end(&dentry->d_seq);
@@ -2714,7 +2719,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
                        write_seqlock(&rename_lock);
                        __d_materialise_dentry(dentry, new);
                        write_sequnlock(&rename_lock);
-                       __d_drop(new);
                        _d_rehash(new);
                        spin_unlock(&new->d_lock);
                        spin_unlock(&inode->i_lock);
@@ -2778,7 +2782,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                                 * could splice into our tree? */
                                __d_materialise_dentry(dentry, alias);
                                write_sequnlock(&rename_lock);
-                               __d_drop(alias);
                                goto found;
                        } else {
                                /* Nope, but we must(!) avoid directory
index b10b48c..7bcfff9 100644 (file)
@@ -1852,7 +1852,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                goto error_tgt_fput;
 
        /* Check if EPOLLWAKEUP is allowed */
-       ep_take_care_of_epollwakeup(&epds);
+       if (ep_op_has_event(op))
+               ep_take_care_of_epollwakeup(&epds);
 
        /*
         * We have to check that the file structure underneath the file descriptor
index 90a3cdc..603e4eb 100644 (file)
@@ -3240,6 +3240,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                                 &new.de, &new.inlined);
        if (IS_ERR(new.bh)) {
                retval = PTR_ERR(new.bh);
+               new.bh = NULL;
                goto end_rename;
        }
        if (new.bh) {
@@ -3386,6 +3387,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
                                 &new.de, &new.inlined);
        if (IS_ERR(new.bh)) {
                retval = PTR_ERR(new.bh);
+               new.bh = NULL;
                goto end_rename;
        }
 
index bb0e80f..1e43b90 100644 (file)
@@ -575,6 +575,7 @@ handle_bb:
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
+                       bh = NULL;
                        goto out;
                }
                overhead = ext4_group_overhead_blocks(sb, group);
@@ -603,6 +604,7 @@ handle_ib:
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
+                       bh = NULL;
                        goto out;
                }
 
index d3b4539..da032da 100644 (file)
@@ -982,6 +982,7 @@ nomem:
 submit_op_failed:
        clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
        spin_unlock(&cookie->lock);
+       fscache_unuse_cookie(object);
        kfree(op);
        _leave(" [EIO]");
        return transit_to(KILL_OBJECT);
index 85332b9..de33b3f 100644 (file)
@@ -43,6 +43,19 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa
 }
 EXPORT_SYMBOL(__fscache_wait_on_page_write);
 
+/*
+ * wait for a page to finish being written to the cache. Put a timeout here
+ * since we might be called recursively via parent fs.
+ */
+static
+bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
+{
+       wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
+
+       return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
+                                 HZ);
+}
+
 /*
  * decide whether a page can be released, possibly by cancelling a store to it
  * - we're allowed to sleep if __GFP_WAIT is flagged
@@ -115,7 +128,10 @@ page_busy:
        }
 
        fscache_stat(&fscache_n_store_vmscan_wait);
-       __fscache_wait_on_page_write(cookie, page);
+       if (!release_page_wait_timeout(cookie, page))
+               _debug("fscache writeout timeout page: %p{%lx}",
+                       page, page->index);
+
        gfp &= ~__GFP_WAIT;
        goto try_again;
 }
@@ -182,7 +198,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
 {
        struct fscache_operation *op;
        struct fscache_object *object;
-       bool wake_cookie;
+       bool wake_cookie = false;
 
        _enter("%p", cookie);
 
@@ -212,15 +228,16 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
 
        __fscache_use_cookie(cookie);
        if (fscache_submit_exclusive_op(object, op) < 0)
-               goto nobufs;
+               goto nobufs_dec;
        spin_unlock(&cookie->lock);
        fscache_stat(&fscache_n_attr_changed_ok);
        fscache_put_operation(op);
        _leave(" = 0");
        return 0;
 
-nobufs:
+nobufs_dec:
        wake_cookie = __fscache_unuse_cookie(cookie);
+nobufs:
        spin_unlock(&cookie->lock);
        kfree(op);
        if (wake_cookie)
index e6ee5b6..f0b945a 100644 (file)
@@ -359,7 +359,7 @@ static inline void release_metapath(struct metapath *mp)
  * Returns: The length of the extent (minimum of one block)
  */
 
-static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, unsigned limit, int *eob)
+static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, size_t limit, int *eob)
 {
        const __be64 *end = (start + len);
        const __be64 *first = ptr;
@@ -449,7 +449,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
                           struct buffer_head *bh_map, struct metapath *mp,
                           const unsigned int sheight,
                           const unsigned int height,
-                          const unsigned int maxlen)
+                          const size_t maxlen)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -483,7 +483,8 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
        } else {
                /* Need to allocate indirect blocks */
                ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
-               dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]);
+               dblks = min(maxlen, (size_t)(ptrs_per_blk -
+                                            mp->mp_list[end_of_metadata]));
                if (height == ip->i_height) {
                        /* Writing into existing tree, extend tree down */
                        iblks = height - sheight;
@@ -605,7 +606,7 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        unsigned int bsize = sdp->sd_sb.sb_bsize;
-       const unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
+       const size_t maxlen = bh_map->b_size >> inode->i_blkbits;
        const u64 *arr = sdp->sd_heightsize;
        __be64 *ptr;
        u64 size;
index 26b3f95..7f4ed3d 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/dlm.h>
 #include <linux/dlm_plock.h>
 #include <linux/aio.h>
+#include <linux/delay.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -979,9 +980,10 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
        unsigned int state;
        int flags;
        int error = 0;
+       int sleeptime;
 
        state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
-       flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
+       flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
 
        mutex_lock(&fp->f_fl_mutex);
 
@@ -1001,7 +1003,14 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
                gfs2_holder_init(gl, state, flags, fl_gh);
                gfs2_glock_put(gl);
        }
-       error = gfs2_glock_nq(fl_gh);
+       for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
+               error = gfs2_glock_nq(fl_gh);
+               if (error != GLR_TRYFAILED)
+                       break;
+               fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
+               fl_gh->gh_error = 0;
+               msleep(sleeptime);
+       }
        if (error) {
                gfs2_holder_uninit(fl_gh);
                if (error == GLR_TRYFAILED)
@@ -1024,7 +1033,7 @@ static void do_unflock(struct file *file, struct file_lock *fl)
        mutex_lock(&fp->f_fl_mutex);
        flock_lock_file_wait(file, fl);
        if (fl_gh->gh_gl) {
-               gfs2_glock_dq_wait(fl_gh);
+               gfs2_glock_dq(fl_gh);
                gfs2_holder_uninit(fl_gh);
        }
        mutex_unlock(&fp->f_fl_mutex);
index 67d310c..39e7e99 100644 (file)
@@ -262,6 +262,9 @@ struct gfs2_holder {
        unsigned long gh_ip;
 };
 
+/* Number of quota types we support */
+#define GFS2_MAXQUOTAS 2
+
 /* Resource group multi-block reservation, in order of appearance:
 
    Step 1. Function prepares to write, allocates a mb, sets the size hint.
@@ -282,8 +285,8 @@ struct gfs2_blkreserv {
        u64 rs_inum;                  /* Inode number for reservation */
 
        /* ancillary quota stuff */
-       struct gfs2_quota_data *rs_qa_qd[2 * MAXQUOTAS];
-       struct gfs2_holder rs_qa_qd_ghs[2 * MAXQUOTAS];
+       struct gfs2_quota_data *rs_qa_qd[2 * GFS2_MAXQUOTAS];
+       struct gfs2_holder rs_qa_qd_ghs[2 * GFS2_MAXQUOTAS];
        unsigned int rs_qa_qd_num;
 };
 
index e62e594..fc8ac2e 100644 (file)
@@ -626,8 +626,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (!IS_ERR(inode)) {
                d = d_splice_alias(inode, dentry);
                error = PTR_ERR(d);
-               if (IS_ERR(d))
+               if (IS_ERR(d)) {
+                       inode = ERR_CAST(d);
                        goto fail_gunlock;
+               }
                error = 0;
                if (file) {
                        if (S_ISREG(inode->i_mode)) {
@@ -840,8 +842,10 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
        int error;
 
        inode = gfs2_lookupi(dir, &dentry->d_name, 0);
-       if (!inode)
+       if (inode == NULL) {
+               d_add(dentry, NULL);
                return NULL;
+       }
        if (IS_ERR(inode))
                return ERR_CAST(inode);
 
@@ -854,7 +858,6 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
 
        d = d_splice_alias(inode, dentry);
        if (IS_ERR(d)) {
-               iput(inode);
                gfs2_glock_dq_uninit(&gh);
                return d;
        }
index 2607ff1..a346f56 100644 (file)
@@ -1294,7 +1294,7 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
        int val;
 
        if (is_ancestor(root, sdp->sd_master_dir))
-               seq_printf(s, ",meta");
+               seq_puts(s, ",meta");
        if (args->ar_lockproto[0])
                seq_printf(s, ",lockproto=%s", args->ar_lockproto);
        if (args->ar_locktable[0])
@@ -1302,13 +1302,13 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
        if (args->ar_hostdata[0])
                seq_printf(s, ",hostdata=%s", args->ar_hostdata);
        if (args->ar_spectator)
-               seq_printf(s, ",spectator");
+               seq_puts(s, ",spectator");
        if (args->ar_localflocks)
-               seq_printf(s, ",localflocks");
+               seq_puts(s, ",localflocks");
        if (args->ar_debug)
-               seq_printf(s, ",debug");
+               seq_puts(s, ",debug");
        if (args->ar_posix_acl)
-               seq_printf(s, ",acl");
+               seq_puts(s, ",acl");
        if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
                char *state;
                switch (args->ar_quota) {
@@ -1328,7 +1328,7 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
                seq_printf(s, ",quota=%s", state);
        }
        if (args->ar_suiddir)
-               seq_printf(s, ",suiddir");
+               seq_puts(s, ",suiddir");
        if (args->ar_data != GFS2_DATA_DEFAULT) {
                char *state;
                switch (args->ar_data) {
@@ -1345,7 +1345,7 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
                seq_printf(s, ",data=%s", state);
        }
        if (args->ar_discard)
-               seq_printf(s, ",discard");
+               seq_puts(s, ",discard");
        val = sdp->sd_tune.gt_logd_secs;
        if (val != 30)
                seq_printf(s, ",commit=%d", val);
@@ -1376,11 +1376,11 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
                seq_printf(s, ",errors=%s", state);
        }
        if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
-               seq_printf(s, ",nobarrier");
+               seq_puts(s, ",nobarrier");
        if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
-               seq_printf(s, ",demote_interface_used");
+               seq_puts(s, ",demote_interface_used");
        if (args->ar_rgrplvb)
-               seq_printf(s, ",rgrplvb");
+               seq_puts(s, ",rgrplvb");
        return 0;
 }
 
index 8f27c93..ec9e082 100644 (file)
@@ -253,13 +253,11 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
 
        error = make_socks(serv, net);
        if (error < 0)
-               goto err_socks;
+               goto err_bind;
        set_grace_period(net);
        dprintk("lockd_up_net: per-net data created; net=%p\n", net);
        return 0;
 
-err_socks:
-       svc_rpcb_cleanup(serv, net);
 err_bind:
        ln->nlmsvc_users--;
        return error;
index a996bb4..a7b05bf 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/device_cgroup.h>
 #include <linux/fs_struct.h>
 #include <linux/posix_acl.h>
+#include <linux/hash.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -643,24 +644,22 @@ static int complete_walk(struct nameidata *nd)
 
 static __always_inline void set_root(struct nameidata *nd)
 {
-       if (!nd->root.mnt)
-               get_fs_root(current->fs, &nd->root);
+       get_fs_root(current->fs, &nd->root);
 }
 
 static int link_path_walk(const char *, struct nameidata *);
 
-static __always_inline void set_root_rcu(struct nameidata *nd)
+static __always_inline unsigned set_root_rcu(struct nameidata *nd)
 {
-       if (!nd->root.mnt) {
-               struct fs_struct *fs = current->fs;
-               unsigned seq;
+       struct fs_struct *fs = current->fs;
+       unsigned seq, res;
 
-               do {
-                       seq = read_seqcount_begin(&fs->seq);
-                       nd->root = fs->root;
-                       nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
-               } while (read_seqcount_retry(&fs->seq, seq));
-       }
+       do {
+               seq = read_seqcount_begin(&fs->seq);
+               nd->root = fs->root;
+               res = __read_seqcount_begin(&nd->root.dentry->d_seq);
+       } while (read_seqcount_retry(&fs->seq, seq));
+       return res;
 }
 
 static void path_put_conditional(struct path *path, struct nameidata *nd)
@@ -860,7 +859,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
                        return PTR_ERR(s);
                }
                if (*s == '/') {
-                       set_root(nd);
+                       if (!nd->root.mnt)
+                               set_root(nd);
                        path_put(&nd->path);
                        nd->path = nd->root;
                        path_get(&nd->root);
@@ -1137,13 +1137,15 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
                 */
                *inode = path->dentry->d_inode;
        }
-       return read_seqretry(&mount_lock, nd->m_seq) &&
+       return !read_seqretry(&mount_lock, nd->m_seq) &&
                !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
 }
 
 static int follow_dotdot_rcu(struct nameidata *nd)
 {
-       set_root_rcu(nd);
+       struct inode *inode = nd->inode;
+       if (!nd->root.mnt)
+               set_root_rcu(nd);
 
        while (1) {
                if (nd->path.dentry == nd->root.dentry &&
@@ -1155,6 +1157,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
                        struct dentry *parent = old->d_parent;
                        unsigned seq;
 
+                       inode = parent->d_inode;
                        seq = read_seqcount_begin(&parent->d_seq);
                        if (read_seqcount_retry(&old->d_seq, nd->seq))
                                goto failed;
@@ -1164,6 +1167,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
                }
                if (!follow_up_rcu(&nd->path))
                        break;
+               inode = nd->path.dentry->d_inode;
                nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
        }
        while (d_mountpoint(nd->path.dentry)) {
@@ -1173,11 +1177,12 @@ static int follow_dotdot_rcu(struct nameidata *nd)
                        break;
                nd->path.mnt = &mounted->mnt;
                nd->path.dentry = mounted->mnt.mnt_root;
+               inode = nd->path.dentry->d_inode;
                nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
-               if (!read_seqretry(&mount_lock, nd->m_seq))
+               if (read_seqretry(&mount_lock, nd->m_seq))
                        goto failed;
        }
-       nd->inode = nd->path.dentry->d_inode;
+       nd->inode = inode;
        return 0;
 
 failed:
@@ -1256,7 +1261,8 @@ static void follow_mount(struct path *path)
 
 static void follow_dotdot(struct nameidata *nd)
 {
-       set_root(nd);
+       if (!nd->root.mnt)
+               set_root(nd);
 
        while(1) {
                struct dentry *old = nd->path.dentry;
@@ -1634,8 +1640,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
 
 static inline unsigned int fold_hash(unsigned long hash)
 {
-       hash += hash >> (8*sizeof(int));
-       return hash;
+       return hash_64(hash, 32);
 }
 
 #else  /* 32-bit case */
@@ -1669,9 +1674,9 @@ EXPORT_SYMBOL(full_name_hash);
 
 /*
  * Calculate the length and hash of the path component, and
- * return the length of the component;
+ * return the "hash_len" as the result.
  */
-static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+static inline u64 hash_name(const char *name)
 {
        unsigned long a, b, adata, bdata, mask, hash, len;
        const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
@@ -1691,9 +1696,8 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
        mask = create_zero_mask(adata | bdata);
 
        hash += a & zero_bytemask(mask);
-       *hashp = fold_hash(hash);
-
-       return len + find_zero(mask);
+       len += find_zero(mask);
+       return hashlen_create(fold_hash(hash), len);
 }
 
 #else
@@ -1711,7 +1715,7 @@ EXPORT_SYMBOL(full_name_hash);
  * We know there's a real path component here of at least
  * one character.
  */
-static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+static inline u64 hash_name(const char *name)
 {
        unsigned long hash = init_name_hash();
        unsigned long len = 0, c;
@@ -1722,8 +1726,7 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
                hash = partial_name_hash(c, hash);
                c = (unsigned char)name[len];
        } while (c && c != '/');
-       *hashp = end_name_hash(hash);
-       return len;
+       return hashlen_create(end_name_hash(hash), len);
 }
 
 #endif
@@ -1748,20 +1751,17 @@ static int link_path_walk(const char *name, struct nameidata *nd)
 
        /* At this point we know we have a real path component. */
        for(;;) {
-               struct qstr this;
-               long len;
+               u64 hash_len;
                int type;
 
                err = may_lookup(nd);
                if (err)
                        break;
 
-               len = hash_name(name, &this.hash);
-               this.name = name;
-               this.len = len;
+               hash_len = hash_name(name);
 
                type = LAST_NORM;
-               if (name[0] == '.') switch (len) {
+               if (name[0] == '.') switch (hashlen_len(hash_len)) {
                        case 2:
                                if (name[1] == '.') {
                                        type = LAST_DOTDOT;
@@ -1775,29 +1775,32 @@ static int link_path_walk(const char *name, struct nameidata *nd)
                        struct dentry *parent = nd->path.dentry;
                        nd->flags &= ~LOOKUP_JUMPED;
                        if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
+                               struct qstr this = { { .hash_len = hash_len }, .name = name };
                                err = parent->d_op->d_hash(parent, &this);
                                if (err < 0)
                                        break;
+                               hash_len = this.hash_len;
+                               name = this.name;
                        }
                }
 
-               nd->last = this;
+               nd->last.hash_len = hash_len;
+               nd->last.name = name;
                nd->last_type = type;
 
-               if (!name[len])
+               name += hashlen_len(hash_len);
+               if (!*name)
                        return 0;
                /*
                 * If it wasn't NUL, we know it was '/'. Skip that
                 * slash, and continue until no more slashes.
                 */
                do {
-                       len++;
-               } while (unlikely(name[len] == '/'));
-               if (!name[len])
+                       name++;
+               } while (unlikely(*name == '/'));
+               if (!*name)
                        return 0;
 
-               name += len;
-
                err = walk_component(nd, &next, LOOKUP_FOLLOW);
                if (err < 0)
                        return err;
@@ -1852,7 +1855,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        if (*name=='/') {
                if (flags & LOOKUP_RCU) {
                        rcu_read_lock();
-                       set_root_rcu(nd);
+                       nd->seq = set_root_rcu(nd);
                } else {
                        set_root(nd);
                        path_get(&nd->root);
@@ -1903,7 +1906,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        }
 
        nd->inode = nd->path.dentry->d_inode;
-       return 0;
+       if (!(flags & LOOKUP_RCU))
+               return 0;
+       if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq)))
+               return 0;
+       if (!(nd->flags & LOOKUP_ROOT))
+               nd->root.mnt = NULL;
+       rcu_read_unlock();
+       return -ECHILD;
 }
 
 static inline int lookup_last(struct nameidata *nd, struct path *path)
index 1c5ff6d..6a4f366 100644 (file)
@@ -1412,24 +1412,18 @@ int nfs_fs_proc_net_init(struct net *net)
        p = proc_create("volumes", S_IFREG|S_IRUGO,
                        nn->proc_nfsfs, &nfs_volume_list_fops);
        if (!p)
-               goto error_2;
+               goto error_1;
        return 0;
 
-error_2:
-       remove_proc_entry("servers", nn->proc_nfsfs);
 error_1:
-       remove_proc_entry("fs/nfsfs", NULL);
+       remove_proc_subtree("nfsfs", net->proc_net);
 error_0:
        return -ENOMEM;
 }
 
 void nfs_fs_proc_net_exit(struct net *net)
 {
-       struct nfs_net *nn = net_generic(net, nfs_net_id);
-
-       remove_proc_entry("volumes", nn->proc_nfsfs);
-       remove_proc_entry("servers", nn->proc_nfsfs);
-       remove_proc_entry("fs/nfsfs", NULL);
+       remove_proc_subtree("nfsfs", net->proc_net);
 }
 
 /*
index 1359c4a..9097807 100644 (file)
@@ -1269,11 +1269,12 @@ filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
 static void filelayout_retry_commit(struct nfs_commit_info *cinfo, int idx)
 {
        struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
-       struct pnfs_commit_bucket *bucket = fl_cinfo->buckets;
+       struct pnfs_commit_bucket *bucket;
        struct pnfs_layout_segment *freeme;
        int i;
 
-       for (i = idx; i < fl_cinfo->nbuckets; i++, bucket++) {
+       for (i = idx; i < fl_cinfo->nbuckets; i++) {
+               bucket = &fl_cinfo->buckets[i];
                if (list_empty(&bucket->committing))
                        continue;
                nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
index 92193ed..a8b855a 100644 (file)
@@ -130,16 +130,15 @@ enum {
  */
 
 struct nfs4_lock_state {
-       struct list_head                ls_locks;   /* Other lock stateids */
-       struct nfs4_state *             ls_state;   /* Pointer to open state */
+       struct list_head        ls_locks;       /* Other lock stateids */
+       struct nfs4_state *     ls_state;       /* Pointer to open state */
 #define NFS_LOCK_INITIALIZED 0
 #define NFS_LOCK_LOST        1
-       unsigned long                   ls_flags;
+       unsigned long           ls_flags;
        struct nfs_seqid_counter        ls_seqid;
-       nfs4_stateid                    ls_stateid;
-       atomic_t                        ls_count;
-       fl_owner_t                      ls_owner;
-       struct work_struct              ls_release;
+       nfs4_stateid            ls_stateid;
+       atomic_t                ls_count;
+       fl_owner_t              ls_owner;
 };
 
 /* bits for nfs4_state->flags */
index 53e435a..ffdb28d 100644 (file)
@@ -482,6 +482,16 @@ int nfs40_walk_client_list(struct nfs_client *new,
 
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
+
+               if (pos->rpc_ops != new->rpc_ops)
+                       continue;
+
+               if (pos->cl_proto != new->cl_proto)
+                       continue;
+
+               if (pos->cl_minorversion != new->cl_minorversion)
+                       continue;
+
                /* If "pos" isn't marked ready, we can't trust the
                 * remaining fields in "pos" */
                if (pos->cl_cons_state > NFS_CS_READY) {
@@ -501,15 +511,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
                if (pos->cl_cons_state != NFS_CS_READY)
                        continue;
 
-               if (pos->rpc_ops != new->rpc_ops)
-                       continue;
-
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
-               if (pos->cl_minorversion != new->cl_minorversion)
-                       continue;
-
                if (pos->cl_clientid != new->cl_clientid)
                        continue;
 
@@ -622,6 +623,16 @@ int nfs41_walk_client_list(struct nfs_client *new,
 
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
+
+               if (pos->rpc_ops != new->rpc_ops)
+                       continue;
+
+               if (pos->cl_proto != new->cl_proto)
+                       continue;
+
+               if (pos->cl_minorversion != new->cl_minorversion)
+                       continue;
+
                /* If "pos" isn't marked ready, we can't trust the
                 * remaining fields in "pos", especially the client
                 * ID and serverowner fields.  Wait for CREATE_SESSION
@@ -647,15 +658,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (pos->cl_cons_state != NFS_CS_READY)
                        continue;
 
-               if (pos->rpc_ops != new->rpc_ops)
-                       continue;
-
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
-               if (pos->cl_minorversion != new->cl_minorversion)
-                       continue;
-
                if (!nfs4_match_clientids(pos, new))
                        continue;
 
index 7dd8aca..6ca0c8e 100644 (file)
@@ -2226,9 +2226,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
        ret = _nfs4_proc_open(opendata);
        if (ret != 0) {
                if (ret == -ENOENT) {
-                       d_drop(opendata->dentry);
-                       d_add(opendata->dentry, NULL);
-                       nfs_set_verifier(opendata->dentry,
+                       dentry = opendata->dentry;
+                       if (dentry->d_inode)
+                               d_delete(dentry);
+                       else if (d_unhashed(dentry))
+                               d_add(dentry, NULL);
+
+                       nfs_set_verifier(dentry,
                                         nfs_save_change_attribute(opendata->dir->d_inode));
                }
                goto out;
@@ -2614,23 +2618,23 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
        is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
        is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
        is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
-       /* Calculate the current open share mode */
-       calldata->arg.fmode = 0;
-       if (is_rdonly || is_rdwr)
-               calldata->arg.fmode |= FMODE_READ;
-       if (is_wronly || is_rdwr)
-               calldata->arg.fmode |= FMODE_WRITE;
        /* Calculate the change in open mode */
+       calldata->arg.fmode = 0;
        if (state->n_rdwr == 0) {
-               if (state->n_rdonly == 0) {
-                       call_close |= is_rdonly || is_rdwr;
-                       calldata->arg.fmode &= ~FMODE_READ;
-               }
-               if (state->n_wronly == 0) {
-                       call_close |= is_wronly || is_rdwr;
-                       calldata->arg.fmode &= ~FMODE_WRITE;
-               }
-       }
+               if (state->n_rdonly == 0)
+                       call_close |= is_rdonly;
+               else if (is_rdonly)
+                       calldata->arg.fmode |= FMODE_READ;
+               if (state->n_wronly == 0)
+                       call_close |= is_wronly;
+               else if (is_wronly)
+                       calldata->arg.fmode |= FMODE_WRITE;
+       } else if (is_rdwr)
+               calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
+
+       if (calldata->arg.fmode == 0)
+               call_close |= is_rdwr;
+
        if (!nfs4_valid_open_stateid(state))
                call_close = 0;
        spin_unlock(&state->owner->so_lock);
index a043f61..22fe351 100644 (file)
@@ -799,18 +799,6 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
        return NULL;
 }
 
-static void
-free_lock_state_work(struct work_struct *work)
-{
-       struct nfs4_lock_state *lsp = container_of(work,
-                                       struct nfs4_lock_state, ls_release);
-       struct nfs4_state *state = lsp->ls_state;
-       struct nfs_server *server = state->owner->so_server;
-       struct nfs_client *clp = server->nfs_client;
-
-       clp->cl_mvops->free_lock_state(server, lsp);
-}
-
 /*
  * Return a compatible lock_state. If no initialized lock_state structure
  * exists, return an uninitialized one.
@@ -832,7 +820,6 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
        if (lsp->ls_seqid.owner_id < 0)
                goto out_free;
        INIT_LIST_HEAD(&lsp->ls_locks);
-       INIT_WORK(&lsp->ls_release, free_lock_state_work);
        return lsp;
 out_free:
        kfree(lsp);
@@ -896,12 +883,13 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
        if (list_empty(&state->lock_states))
                clear_bit(LK_STATE_IN_USE, &state->flags);
        spin_unlock(&state->state_lock);
-       if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags))
-               queue_work(nfsiod_workqueue, &lsp->ls_release);
-       else {
-               server = state->owner->so_server;
+       server = state->owner->so_server;
+       if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
+               struct nfs_client *clp = server->nfs_client;
+
+               clp->cl_mvops->free_lock_state(server, lsp);
+       } else
                nfs4_free_lock_state(server, lsp);
-       }
 }
 
 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
index f9821ce..e94457c 100644 (file)
@@ -2657,6 +2657,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
        struct xdr_stream *xdr = cd->xdr;
        int start_offset = xdr->buf->len;
        int cookie_offset;
+       u32 name_and_cookie;
        int entry_bytes;
        __be32 nfserr = nfserr_toosmall;
        __be64 wire_offset;
@@ -2718,7 +2719,14 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
        cd->rd_maxcount -= entry_bytes;
        if (!cd->rd_dircount)
                goto fail;
-       cd->rd_dircount--;
+       /*
+        * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
+        * let's always let through the first entry, at least:
+        */
+       name_and_cookie = 4 * XDR_QUADLEN(namlen) + 8;
+       if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
+               goto fail;
+       cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
        cd->cookie_offset = cookie_offset;
 skip_entry:
        cd->common.err = nfs_ok;
@@ -3321,6 +3329,10 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
        }
        maxcount = min_t(int, maxcount-16, bytes_left);
 
+       /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
+       if (!readdir->rd_dircount)
+               readdir->rd_dircount = INT_MAX;
+
        readdir->xdr = xdr;
        readdir->rd_maxcount = maxcount;
        readdir->common.err = 0;
index 238a593..9d7e2b9 100644 (file)
@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
 {
        struct {
                struct file_handle handle;
-               u8 pad[64];
+               u8 pad[MAX_HANDLE_SZ];
        } f;
        int size, ret, i;
 
@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
        size = f.handle.handle_bytes >> 2;
 
        ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
-       if ((ret == 255) || (ret == -ENOSPC)) {
+       if ((ret == FILEID_INVALID) || (ret < 0)) {
                WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
                return 0;
        }
index 6eaf5ed..e77db62 100644 (file)
@@ -45,7 +45,7 @@ void udf_free_inode(struct inode *inode)
        udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
 }
 
-struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
+struct inode *udf_new_inode(struct inode *dir, umode_t mode)
 {
        struct super_block *sb = dir->i_sb;
        struct udf_sb_info *sbi = UDF_SB(sb);
@@ -55,14 +55,12 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
        struct udf_inode_info *iinfo;
        struct udf_inode_info *dinfo = UDF_I(dir);
        struct logicalVolIntegrityDescImpUse *lvidiu;
+       int err;
 
        inode = new_inode(sb);
 
-       if (!inode) {
-               *err = -ENOMEM;
-               return NULL;
-       }
-       *err = -ENOSPC;
+       if (!inode)
+               return ERR_PTR(-ENOMEM);
 
        iinfo = UDF_I(inode);
        if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
@@ -80,21 +78,22 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
        }
        if (!iinfo->i_ext.i_data) {
                iput(inode);
-               *err = -ENOMEM;
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
+       err = -ENOSPC;
        block = udf_new_block(dir->i_sb, NULL,
                              dinfo->i_location.partitionReferenceNum,
-                             start, err);
-       if (*err) {
+                             start, &err);
+       if (err) {
                iput(inode);
-               return NULL;
+               return ERR_PTR(err);
        }
 
        lvidiu = udf_sb_lvidiu(sb);
        if (lvidiu) {
                iinfo->i_unique = lvid_get_unique_id(sb);
+               inode->i_generation = iinfo->i_unique;
                mutex_lock(&sbi->s_alloc_mutex);
                if (S_ISDIR(mode))
                        le32_add_cpu(&lvidiu->numDirs, 1);
@@ -123,9 +122,12 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
                iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
        inode->i_mtime = inode->i_atime = inode->i_ctime =
                iinfo->i_crtime = current_fs_time(inode->i_sb);
-       insert_inode_hash(inode);
+       if (unlikely(insert_inode_locked(inode) < 0)) {
+               make_bad_inode(inode);
+               iput(inode);
+               return ERR_PTR(-EIO);
+       }
        mark_inode_dirty(inode);
 
-       *err = 0;
        return inode;
 }
index 236cd48..0859884 100644 (file)
@@ -51,7 +51,6 @@ MODULE_LICENSE("GPL");
 
 static umode_t udf_convert_permissions(struct fileEntry *);
 static int udf_update_inode(struct inode *, int);
-static void udf_fill_inode(struct inode *, struct buffer_head *);
 static int udf_sync_inode(struct inode *inode);
 static int udf_alloc_i_data(struct inode *inode, size_t size);
 static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
@@ -1271,12 +1270,33 @@ update_time:
        return 0;
 }
 
-static void __udf_read_inode(struct inode *inode)
+/*
+ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
+ * arbitrary - just that we hopefully don't limit any real use of rewritten
+ * inode on write-once media but avoid looping for too long on corrupted media.
+ */
+#define UDF_MAX_ICB_NESTING 1024
+
+static int udf_read_inode(struct inode *inode)
 {
        struct buffer_head *bh = NULL;
        struct fileEntry *fe;
+       struct extendedFileEntry *efe;
        uint16_t ident;
        struct udf_inode_info *iinfo = UDF_I(inode);
+       struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
+       struct kernel_lb_addr *iloc = &iinfo->i_location;
+       unsigned int link_count;
+       unsigned int indirections = 0;
+       int ret = -EIO;
+
+reread:
+       if (iloc->logicalBlockNum >=
+           sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
+               udf_debug("block=%d, partition=%d out of range\n",
+                         iloc->logicalBlockNum, iloc->partitionReferenceNum);
+               return -EIO;
+       }
 
        /*
         * Set defaults, but the inode is still incomplete!
@@ -1290,78 +1310,54 @@ static void __udf_read_inode(struct inode *inode)
         *      i_nlink = 1
         *      i_op = NULL;
         */
-       bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident);
+       bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
        if (!bh) {
                udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
-               make_bad_inode(inode);
-               return;
+               return -EIO;
        }
 
        if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
            ident != TAG_IDENT_USE) {
                udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
                        inode->i_ino, ident);
-               brelse(bh);
-               make_bad_inode(inode);
-               return;
+               goto out;
        }
 
        fe = (struct fileEntry *)bh->b_data;
+       efe = (struct extendedFileEntry *)bh->b_data;
 
        if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
                struct buffer_head *ibh;
 
-               ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
-                                       &ident);
+               ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
                if (ident == TAG_IDENT_IE && ibh) {
-                       struct buffer_head *nbh = NULL;
                        struct kernel_lb_addr loc;
                        struct indirectEntry *ie;
 
                        ie = (struct indirectEntry *)ibh->b_data;
                        loc = lelb_to_cpu(ie->indirectICB.extLocation);
 
-                       if (ie->indirectICB.extLength &&
-                               (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
-                                                       &ident))) {
-                               if (ident == TAG_IDENT_FE ||
-                                       ident == TAG_IDENT_EFE) {
-                                       memcpy(&iinfo->i_location,
-                                               &loc,
-                                               sizeof(struct kernel_lb_addr));
-                                       brelse(bh);
-                                       brelse(ibh);
-                                       brelse(nbh);
-                                       __udf_read_inode(inode);
-                                       return;
+                       if (ie->indirectICB.extLength) {
+                               brelse(ibh);
+                               memcpy(&iinfo->i_location, &loc,
+                                      sizeof(struct kernel_lb_addr));
+                               if (++indirections > UDF_MAX_ICB_NESTING) {
+                                       udf_err(inode->i_sb,
+                                               "too many ICBs in ICB hierarchy"
+                                               " (max %d supported)\n",
+                                               UDF_MAX_ICB_NESTING);
+                                       goto out;
                                }
-                               brelse(nbh);
+                               brelse(bh);
+                               goto reread;
                        }
                }
                brelse(ibh);
        } else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
                udf_err(inode->i_sb, "unsupported strategy type: %d\n",
                        le16_to_cpu(fe->icbTag.strategyType));
-               brelse(bh);
-               make_bad_inode(inode);
-               return;
+               goto out;
        }
-       udf_fill_inode(inode, bh);
-
-       brelse(bh);
-}
-
-static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
-{
-       struct fileEntry *fe;
-       struct extendedFileEntry *efe;
-       struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
-       struct udf_inode_info *iinfo = UDF_I(inode);
-       unsigned int link_count;
-
-       fe = (struct fileEntry *)bh->b_data;
-       efe = (struct extendedFileEntry *)bh->b_data;
-
        if (fe->icbTag.strategyType == cpu_to_le16(4))
                iinfo->i_strat4096 = 0;
        else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
@@ -1378,11 +1374,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
                iinfo->i_efe = 1;
                iinfo->i_use = 0;
-               if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
-                                       sizeof(struct extendedFileEntry))) {
-                       make_bad_inode(inode);
-                       return;
-               }
+               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+                                       sizeof(struct extendedFileEntry));
+               if (ret)
+                       goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct extendedFileEntry),
                       inode->i_sb->s_blocksize -
@@ -1390,11 +1385,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
                iinfo->i_efe = 0;
                iinfo->i_use = 0;
-               if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
-                                               sizeof(struct fileEntry))) {
-                       make_bad_inode(inode);
-                       return;
-               }
+               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+                                               sizeof(struct fileEntry));
+               if (ret)
+                       goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct fileEntry),
                       inode->i_sb->s_blocksize - sizeof(struct fileEntry));
@@ -1404,18 +1398,18 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
                iinfo->i_lenAlloc = le32_to_cpu(
                                ((struct unallocSpaceEntry *)bh->b_data)->
                                 lengthAllocDescs);
-               if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
-                                       sizeof(struct unallocSpaceEntry))) {
-                       make_bad_inode(inode);
-                       return;
-               }
+               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+                                       sizeof(struct unallocSpaceEntry));
+               if (ret)
+                       goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct unallocSpaceEntry),
                       inode->i_sb->s_blocksize -
                                        sizeof(struct unallocSpaceEntry));
-               return;
+               return 0;
        }
 
+       ret = -EIO;
        read_lock(&sbi->s_cred_lock);
        i_uid_write(inode, le32_to_cpu(fe->uid));
        if (!uid_valid(inode->i_uid) ||
@@ -1441,8 +1435,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        read_unlock(&sbi->s_cred_lock);
 
        link_count = le16_to_cpu(fe->fileLinkCount);
-       if (!link_count)
-               link_count = 1;
+       if (!link_count) {
+               ret = -ESTALE;
+               goto out;
+       }
        set_nlink(inode, link_count);
 
        inode->i_size = le64_to_cpu(fe->informationLength);
@@ -1488,6 +1484,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
                iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
                iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
        }
+       inode->i_generation = iinfo->i_unique;
 
        switch (fe->icbTag.fileType) {
        case ICBTAG_FILE_TYPE_DIRECTORY:
@@ -1537,8 +1534,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        default:
                udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
                        inode->i_ino, fe->icbTag.fileType);
-               make_bad_inode(inode);
-               return;
+               goto out;
        }
        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
                struct deviceSpec *dsea =
@@ -1549,8 +1545,12 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
                                      le32_to_cpu(dsea->minorDeviceIdent)));
                        /* Developer ID ??? */
                } else
-                       make_bad_inode(inode);
+                       goto out;
        }
+       ret = 0;
+out:
+       brelse(bh);
+       return ret;
 }
 
 static int udf_alloc_i_data(struct inode *inode, size_t size)
@@ -1664,7 +1664,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
                     FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
        fe->permissions = cpu_to_le32(udfperms);
 
-       if (S_ISDIR(inode->i_mode))
+       if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
                fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
        else
                fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
@@ -1830,32 +1830,23 @@ struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
 {
        unsigned long block = udf_get_lb_pblock(sb, ino, 0);
        struct inode *inode = iget_locked(sb, block);
+       int err;
 
        if (!inode)
-               return NULL;
-
-       if (inode->i_state & I_NEW) {
-               memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
-               __udf_read_inode(inode);
-               unlock_new_inode(inode);
-       }
+               return ERR_PTR(-ENOMEM);
 
-       if (is_bad_inode(inode))
-               goto out_iput;
+       if (!(inode->i_state & I_NEW))
+               return inode;
 
-       if (ino->logicalBlockNum >= UDF_SB(sb)->
-                       s_partmaps[ino->partitionReferenceNum].s_partition_len) {
-               udf_debug("block=%d, partition=%d out of range\n",
-                         ino->logicalBlockNum, ino->partitionReferenceNum);
-               make_bad_inode(inode);
-               goto out_iput;
+       memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
+       err = udf_read_inode(inode);
+       if (err < 0) {
+               iget_failed(inode);
+               return ERR_PTR(err);
        }
+       unlock_new_inode(inode);
 
        return inode;
-
- out_iput:
-       iput(inode);
-       return NULL;
 }
 
 int udf_add_aext(struct inode *inode, struct extent_position *epos,
index 83a0600..c12e260 100644 (file)
@@ -270,9 +270,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
                                                NULL, 0),
                };
                inode = udf_iget(dir->i_sb, lb);
-               if (!inode) {
-                       return ERR_PTR(-EACCES);
-               }
+               if (IS_ERR(inode))
+                       return inode;
        } else
 #endif /* UDF_RECOVERY */
 
@@ -285,9 +284,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
 
                loc = lelb_to_cpu(cfi.icb.extLocation);
                inode = udf_iget(dir->i_sb, &loc);
-               if (!inode) {
-                       return ERR_PTR(-EACCES);
-               }
+               if (IS_ERR(inode))
+                       return ERR_CAST(inode);
        }
 
        return d_splice_alias(inode, dentry);
@@ -550,32 +548,18 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
        return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
 }
 
-static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
-                     bool excl)
+static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
 {
+       struct udf_inode_info *iinfo = UDF_I(inode);
+       struct inode *dir = dentry->d_parent->d_inode;
        struct udf_fileident_bh fibh;
-       struct inode *inode;
        struct fileIdentDesc cfi, *fi;
        int err;
-       struct udf_inode_info *iinfo;
-
-       inode = udf_new_inode(dir, mode, &err);
-       if (!inode) {
-               return err;
-       }
-
-       iinfo = UDF_I(inode);
-       if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
-               inode->i_data.a_ops = &udf_adinicb_aops;
-       else
-               inode->i_data.a_ops = &udf_aops;
-       inode->i_op = &udf_file_inode_operations;
-       inode->i_fop = &udf_file_operations;
-       mark_inode_dirty(inode);
 
        fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
-       if (!fi) {
+       if (unlikely(!fi)) {
                inode_dec_link_count(inode);
+               unlock_new_inode(inode);
                iput(inode);
                return err;
        }
@@ -589,23 +573,21 @@ static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        if (fibh.sbh != fibh.ebh)
                brelse(fibh.ebh);
        brelse(fibh.sbh);
+       unlock_new_inode(inode);
        d_instantiate(dentry, inode);
 
        return 0;
 }
 
-static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+                     bool excl)
 {
-       struct inode *inode;
-       struct udf_inode_info *iinfo;
-       int err;
+       struct inode *inode = udf_new_inode(dir, mode);
 
-       inode = udf_new_inode(dir, mode, &err);
-       if (!inode)
-               return err;
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
-       iinfo = UDF_I(inode);
-       if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+       if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
                inode->i_data.a_ops = &udf_adinicb_aops;
        else
                inode->i_data.a_ops = &udf_aops;
@@ -613,7 +595,25 @@ static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
        inode->i_fop = &udf_file_operations;
        mark_inode_dirty(inode);
 
+       return udf_add_nondir(dentry, inode);
+}
+
+static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+       struct inode *inode = udf_new_inode(dir, mode);
+
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
+
+       if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+               inode->i_data.a_ops = &udf_adinicb_aops;
+       else
+               inode->i_data.a_ops = &udf_aops;
+       inode->i_op = &udf_file_inode_operations;
+       inode->i_fop = &udf_file_operations;
+       mark_inode_dirty(inode);
        d_tmpfile(dentry, inode);
+       unlock_new_inode(inode);
        return 0;
 }
 
@@ -621,44 +621,16 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
                     dev_t rdev)
 {
        struct inode *inode;
-       struct udf_fileident_bh fibh;
-       struct fileIdentDesc cfi, *fi;
-       int err;
-       struct udf_inode_info *iinfo;
 
        if (!old_valid_dev(rdev))
                return -EINVAL;
 
-       err = -EIO;
-       inode = udf_new_inode(dir, mode, &err);
-       if (!inode)
-               goto out;
+       inode = udf_new_inode(dir, mode);
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
-       iinfo = UDF_I(inode);
        init_special_inode(inode, mode, rdev);
-       fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
-       if (!fi) {
-               inode_dec_link_count(inode);
-               iput(inode);
-               return err;
-       }
-       cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
-       cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
-       *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-               cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
-       udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
-       if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
-               mark_inode_dirty(dir);
-       mark_inode_dirty(inode);
-
-       if (fibh.sbh != fibh.ebh)
-               brelse(fibh.ebh);
-       brelse(fibh.sbh);
-       d_instantiate(dentry, inode);
-       err = 0;
-
-out:
-       return err;
+       return udf_add_nondir(dentry, inode);
 }
 
 static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
@@ -670,10 +642,9 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        struct udf_inode_info *dinfo = UDF_I(dir);
        struct udf_inode_info *iinfo;
 
-       err = -EIO;
-       inode = udf_new_inode(dir, S_IFDIR | mode, &err);
-       if (!inode)
-               goto out;
+       inode = udf_new_inode(dir, S_IFDIR | mode);
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
        iinfo = UDF_I(inode);
        inode->i_op = &udf_dir_inode_operations;
@@ -681,6 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err);
        if (!fi) {
                inode_dec_link_count(inode);
+               unlock_new_inode(inode);
                iput(inode);
                goto out;
        }
@@ -699,6 +671,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        if (!fi) {
                clear_nlink(inode);
                mark_inode_dirty(inode);
+               unlock_new_inode(inode);
                iput(inode);
                goto out;
        }
@@ -710,6 +683,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
        inc_nlink(dir);
        mark_inode_dirty(dir);
+       unlock_new_inode(inode);
        d_instantiate(dentry, inode);
        if (fibh.sbh != fibh.ebh)
                brelse(fibh.ebh);
@@ -876,14 +850,11 @@ out:
 static int udf_symlink(struct inode *dir, struct dentry *dentry,
                       const char *symname)
 {
-       struct inode *inode;
+       struct inode *inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO);
        struct pathComponent *pc;
        const char *compstart;
-       struct udf_fileident_bh fibh;
        struct extent_position epos = {};
        int eoffset, elen = 0;
-       struct fileIdentDesc *fi;
-       struct fileIdentDesc cfi;
        uint8_t *ea;
        int err;
        int block;
@@ -892,9 +863,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
        struct udf_inode_info *iinfo;
        struct super_block *sb = dir->i_sb;
 
-       inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err);
-       if (!inode)
-               goto out;
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
        iinfo = UDF_I(inode);
        down_write(&iinfo->i_data_sem);
@@ -1012,32 +982,15 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
        mark_inode_dirty(inode);
        up_write(&iinfo->i_data_sem);
 
-       fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
-       if (!fi)
-               goto out_fail;
-       cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
-       cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
-       if (UDF_SB(inode->i_sb)->s_lvid_bh) {
-               *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-                       cpu_to_le32(lvid_get_unique_id(sb));
-       }
-       udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
-       if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
-               mark_inode_dirty(dir);
-       if (fibh.sbh != fibh.ebh)
-               brelse(fibh.ebh);
-       brelse(fibh.sbh);
-       d_instantiate(dentry, inode);
-       err = 0;
-
+       err = udf_add_nondir(dentry, inode);
 out:
        kfree(name);
        return err;
 
 out_no_entry:
        up_write(&iinfo->i_data_sem);
-out_fail:
        inode_dec_link_count(inode);
+       unlock_new_inode(inode);
        iput(inode);
        goto out;
 }
@@ -1222,7 +1175,7 @@ static struct dentry *udf_get_parent(struct dentry *child)
        struct udf_fileident_bh fibh;
 
        if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
-               goto out_unlock;
+               return ERR_PTR(-EACCES);
 
        if (fibh.sbh != fibh.ebh)
                brelse(fibh.ebh);
@@ -1230,12 +1183,10 @@ static struct dentry *udf_get_parent(struct dentry *child)
 
        tloc = lelb_to_cpu(cfi.icb.extLocation);
        inode = udf_iget(child->d_inode->i_sb, &tloc);
-       if (!inode)
-               goto out_unlock;
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
 
        return d_obtain_alias(inode);
-out_unlock:
-       return ERR_PTR(-EACCES);
 }
 
 
@@ -1252,8 +1203,8 @@ static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block,
        loc.partitionReferenceNum = partref;
        inode = udf_iget(sb, &loc);
 
-       if (inode == NULL)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
 
        if (generation && inode->i_generation != generation) {
                iput(inode);
index 813da94..5401fc3 100644 (file)
@@ -961,12 +961,14 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
 
        metadata_fe = udf_iget(sb, &addr);
 
-       if (metadata_fe == NULL)
+       if (IS_ERR(metadata_fe)) {
                udf_warn(sb, "metadata inode efe not found\n");
-       else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
+               return metadata_fe;
+       }
+       if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
                udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
                iput(metadata_fe);
-               metadata_fe = NULL;
+               return ERR_PTR(-EIO);
        }
 
        return metadata_fe;
@@ -978,6 +980,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
        struct udf_part_map *map;
        struct udf_meta_data *mdata;
        struct kernel_lb_addr addr;
+       struct inode *fe;
 
        map = &sbi->s_partmaps[partition];
        mdata = &map->s_type_specific.s_metadata;
@@ -986,22 +989,24 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
        udf_debug("Metadata file location: block = %d part = %d\n",
                  mdata->s_meta_file_loc, map->s_partition_num);
 
-       mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb,
-               mdata->s_meta_file_loc, map->s_partition_num);
-
-       if (mdata->s_metadata_fe == NULL) {
+       fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
+                                        map->s_partition_num);
+       if (IS_ERR(fe)) {
                /* mirror file entry */
                udf_debug("Mirror metadata file location: block = %d part = %d\n",
                          mdata->s_mirror_file_loc, map->s_partition_num);
 
-               mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
-                       mdata->s_mirror_file_loc, map->s_partition_num);
+               fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
+                                                map->s_partition_num);
 
-               if (mdata->s_mirror_fe == NULL) {
+               if (IS_ERR(fe)) {
                        udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
-                       return -EIO;
+                       return PTR_ERR(fe);
                }
-       }
+               mdata->s_mirror_fe = fe;
+       } else
+               mdata->s_metadata_fe = fe;
+
 
        /*
         * bitmap file entry
@@ -1015,15 +1020,16 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
                udf_debug("Bitmap file location: block = %d part = %d\n",
                          addr.logicalBlockNum, addr.partitionReferenceNum);
 
-               mdata->s_bitmap_fe = udf_iget(sb, &addr);
-               if (mdata->s_bitmap_fe == NULL) {
+               fe = udf_iget(sb, &addr);
+               if (IS_ERR(fe)) {
                        if (sb->s_flags & MS_RDONLY)
                                udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
                        else {
                                udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
-                               return -EIO;
+                               return PTR_ERR(fe);
                        }
-               }
+               } else
+                       mdata->s_bitmap_fe = fe;
        }
 
        udf_debug("udf_load_metadata_files Ok\n");
@@ -1111,13 +1117,15 @@ static int udf_fill_partdesc_info(struct super_block *sb,
                                phd->unallocSpaceTable.extPosition),
                        .partitionReferenceNum = p_index,
                };
+               struct inode *inode;
 
-               map->s_uspace.s_table = udf_iget(sb, &loc);
-               if (!map->s_uspace.s_table) {
+               inode = udf_iget(sb, &loc);
+               if (IS_ERR(inode)) {
                        udf_debug("cannot load unallocSpaceTable (part %d)\n",
                                  p_index);
-                       return -EIO;
+                       return PTR_ERR(inode);
                }
+               map->s_uspace.s_table = inode;
                map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
                udf_debug("unallocSpaceTable (part %d) @ %ld\n",
                          p_index, map->s_uspace.s_table->i_ino);
@@ -1144,14 +1152,15 @@ static int udf_fill_partdesc_info(struct super_block *sb,
                                phd->freedSpaceTable.extPosition),
                        .partitionReferenceNum = p_index,
                };
+               struct inode *inode;
 
-               map->s_fspace.s_table = udf_iget(sb, &loc);
-               if (!map->s_fspace.s_table) {
+               inode = udf_iget(sb, &loc);
+               if (IS_ERR(inode)) {
                        udf_debug("cannot load freedSpaceTable (part %d)\n",
                                  p_index);
-                       return -EIO;
+                       return PTR_ERR(inode);
                }
-
+               map->s_fspace.s_table = inode;
                map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
                udf_debug("freedSpaceTable (part %d) @ %ld\n",
                          p_index, map->s_fspace.s_table->i_ino);
@@ -1178,6 +1187,7 @@ static void udf_find_vat_block(struct super_block *sb, int p_index,
        struct udf_part_map *map = &sbi->s_partmaps[p_index];
        sector_t vat_block;
        struct kernel_lb_addr ino;
+       struct inode *inode;
 
        /*
         * VAT file entry is in the last recorded block. Some broken disks have
@@ -1186,10 +1196,13 @@ static void udf_find_vat_block(struct super_block *sb, int p_index,
        ino.partitionReferenceNum = type1_index;
        for (vat_block = start_block;
             vat_block >= map->s_partition_root &&
-            vat_block >= start_block - 3 &&
-            !sbi->s_vat_inode; vat_block--) {
+            vat_block >= start_block - 3; vat_block--) {
                ino.logicalBlockNum = vat_block - map->s_partition_root;
-               sbi->s_vat_inode = udf_iget(sb, &ino);
+               inode = udf_iget(sb, &ino);
+               if (!IS_ERR(inode)) {
+                       sbi->s_vat_inode = inode;
+                       break;
+               }
        }
 }
 
@@ -2205,10 +2218,10 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
        /* assign inodes by physical block number */
        /* perhaps it's not extensible enough, but for now ... */
        inode = udf_iget(sb, &rootdir);
-       if (!inode) {
+       if (IS_ERR(inode)) {
                udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
                       rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
-               ret = -EIO;
+               ret = PTR_ERR(inode);
                goto error_out;
        }
 
index be7dabb..742557b 100644 (file)
@@ -143,7 +143,6 @@ extern int udf_expand_file_adinicb(struct inode *);
 extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
 extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
 extern int udf_setsize(struct inode *, loff_t);
-extern void udf_read_inode(struct inode *);
 extern void udf_evict_inode(struct inode *);
 extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
 extern long udf_block_map(struct inode *, sector_t);
@@ -209,7 +208,7 @@ extern int udf_CS0toUTF8(struct ustr *, const struct ustr *);
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
-extern struct inode *udf_new_inode(struct inode *, umode_t, int *);
+extern struct inode *udf_new_inode(struct inode *, umode_t);
 
 /* truncate.c */
 extern void udf_truncate_tail_extent(struct inode *);
index c1c9de1..d91e59b 100644 (file)
@@ -204,10 +204,9 @@ struct acpi_device_flags {
        u32 match_driver:1;
        u32 initialized:1;
        u32 visited:1;
-       u32 no_hotplug:1;
        u32 hotplug_notify:1;
        u32 is_dock_station:1;
-       u32 reserved:22;
+       u32 reserved:23;
 };
 
 /* File System */
@@ -411,7 +410,6 @@ void acpi_bus_private_data_handler(acpi_handle, void *);
 int acpi_bus_get_private_data(acpi_handle, void **);
 int acpi_bus_attach_private_data(acpi_handle, void *);
 void acpi_bus_detach_private_data(acpi_handle);
-void acpi_bus_no_hotplug(acpi_handle handle);
 extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
 extern int register_acpi_notifier(struct notifier_block *);
 extern int unregister_acpi_notifier(struct notifier_block *);
index 831d786..882675e 100644 (file)
@@ -162,12 +162,25 @@ static inline size_t drbg_max_request_bytes(struct drbg_state *drbg)
 
 static inline size_t drbg_max_addtl(struct drbg_state *drbg)
 {
+#if (__BITS_PER_LONG == 32)
+       /*
+        * SP800-90A allows smaller maximum numbers to be returned -- we
+        * return SIZE_MAX - 1 to allow the verification of the enforcement
+        * of this value in drbg_healthcheck_sanity.
+        */
+       return (SIZE_MAX - 1);
+#else
        return (1UL<<(drbg->core->max_addtllen));
+#endif
 }
 
 static inline size_t drbg_max_requests(struct drbg_state *drbg)
 {
+#if (__BITS_PER_LONG == 32)
+       return SIZE_MAX;
+#else
        return (1UL<<(drbg->core->max_req));
+#endif
 }
 
 /*
index e4ae2ad..75a227c 100644 (file)
@@ -55,6 +55,7 @@ struct qstr {
 #define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
 #define hashlen_hash(hashlen) ((u32) (hashlen))
 #define hashlen_len(hashlen)  ((u32)((hashlen) >> 32))
+#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
 
 struct dentry_stat_t {
        long nr_dentry;
index bd1754c..d0494c3 100644 (file)
@@ -37,6 +37,9 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
 {
        u64 hash = val;
 
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+       hash = hash * GOLDEN_RATIO_PRIME_64;
+#else
        /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
        u64 n = hash;
        n <<= 18;
@@ -51,6 +54,7 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
        hash += n;
        n <<= 2;
        hash += n;
+#endif
 
        /* High bits are more random, so use them. */
        return hash >> (64 - bits);
index 4b79ffe..fa76c79 100644 (file)
@@ -84,10 +84,12 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
        put_device(&trig->dev);
 }
 
-static inline void iio_trigger_get(struct iio_trigger *trig)
+static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
 {
        get_device(&trig->dev);
        __module_get(trig->ops->owner);
+
+       return trig;
 }
 
 /**
index 1f44466..c367cbd 100644 (file)
@@ -258,23 +258,11 @@ extern unsigned long preset_lpj;
 #define SEC_JIFFIE_SC (32 - SHIFT_HZ)
 #endif
 #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
 #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
                                 TICK_NSEC -1) / (u64)TICK_NSEC))
 
 #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
                                         TICK_NSEC -1) / (u64)TICK_NSEC))
-#define USEC_CONVERSION  \
-                    ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
-                                        TICK_NSEC -1) / (u64)TICK_NSEC))
-/*
- * USEC_ROUND is used in the timeval to jiffie conversion.  See there
- * for more details.  It is the scaled resolution rounding value.  Note
- * that it is a 64-bit value.  Since, when it is applied, we are already
- * in jiffies (albit scaled), it is nothing but the bits we will shift
- * off.
- */
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
 /*
  * The maximum jiffie value is (MAX_INT >> 1).  Here we translate that
  * into seconds.  The 64-bit case will overflow if we are not careful,
index 511c6e0..a5b7d7c 100644 (file)
@@ -209,6 +209,7 @@ enum {
        MLX4_BMME_FLAG_TYPE_2_WIN       = 1 <<  9,
        MLX4_BMME_FLAG_RESERVED_LKEY    = 1 << 10,
        MLX4_BMME_FLAG_FAST_REG_WR      = 1 << 11,
+       MLX4_BMME_FLAG_VSD_INIT2RTR     = 1 << 28,
 };
 
 enum mlx4_event {
index 7040dc9..5f4e36c 100644 (file)
@@ -56,7 +56,8 @@ enum mlx4_qp_optpar {
        MLX4_QP_OPTPAR_RNR_RETRY                = 1 << 13,
        MLX4_QP_OPTPAR_ACK_TIMEOUT              = 1 << 14,
        MLX4_QP_OPTPAR_SCHED_QUEUE              = 1 << 16,
-       MLX4_QP_OPTPAR_COUNTER_INDEX            = 1 << 20
+       MLX4_QP_OPTPAR_COUNTER_INDEX            = 1 << 20,
+       MLX4_QP_OPTPAR_VLAN_STRIPPING           = 1 << 21,
 };
 
 enum mlx4_qp_state {
@@ -423,13 +424,20 @@ struct mlx4_wqe_inline_seg {
 
 enum mlx4_update_qp_attr {
        MLX4_UPDATE_QP_SMAC             = 1 << 0,
+       MLX4_UPDATE_QP_VSD              = 1 << 2,
+       MLX4_UPDATE_QP_SUPPORTED_ATTRS  = (1 << 2) - 1
+};
+
+enum mlx4_update_qp_params_flags {
+       MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE          = 1 << 0,
 };
 
 struct mlx4_update_qp_params {
        u8      smac_index;
+       u32     flags;
 };
 
-int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params);
 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
index 61978a4..96453f9 100644 (file)
@@ -303,6 +303,7 @@ struct pci_dev {
                                                   D3cold, not set for devices
                                                   powered on/off by the
                                                   corresponding bridge */
+       unsigned int    ignore_hotplug:1;       /* Ignore hotplug events */
        unsigned int    d3_delay;       /* D3->D0 transition time in ms */
        unsigned int    d3cold_delay;   /* D3cold->D0 transition time in ms */
 
@@ -1021,6 +1022,11 @@ bool pci_dev_run_wake(struct pci_dev *dev);
 bool pci_check_pme_status(struct pci_dev *dev);
 void pci_pme_wakeup_bus(struct pci_bus *bus);
 
+static inline void pci_ignore_hotplug(struct pci_dev *dev)
+{
+       dev->ignore_hotplug = 1;
+}
+
 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
                                  bool enable)
 {
index 502073a..b483abd 100644 (file)
@@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
 
 int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
 int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
 #else
 
@@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
 
 static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
 static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
 
 #endif
index 2c02f3a..c37bd4d 100644 (file)
@@ -182,7 +182,6 @@ extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
  *     vga_get()...
  */
 
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
 #ifdef CONFIG_VGA_ARB
 extern struct pci_dev *vga_default_device(void);
 extern void vga_set_default_device(struct pci_dev *pdev);
@@ -190,7 +189,6 @@ extern void vga_set_default_device(struct pci_dev *pdev);
 static inline struct pci_dev *vga_default_device(void) { return NULL; };
 static inline void vga_set_default_device(struct pci_dev *pdev) { };
 #endif
-#endif
 
 /**
  *     vga_conflicts
index a0cc2e9..b996e6c 100644 (file)
@@ -419,7 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
        alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
                        1, (name))
 #define create_singlethread_workqueue(name)                            \
-       alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
+       alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
index f679877..ec51e67 100644 (file)
@@ -204,6 +204,7 @@ void ipv6_sock_ac_close(struct sock *sk);
 
 int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
 int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+void ipv6_ac_destroy_dev(struct inet6_dev *idev);
 bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
                         const struct in6_addr *addr);
 bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
index 71c60f4..a8ae4e7 100644 (file)
@@ -480,6 +480,7 @@ void dst_init(void);
 /* Flags for xfrm_lookup flags argument. */
 enum {
        XFRM_LOOKUP_ICMP = 1 << 0,
+       XFRM_LOOKUP_QUEUE = 1 << 1,
 };
 
 struct flowi;
@@ -490,7 +491,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
                                            int flags)
 {
        return dst_orig;
-} 
+}
+
+static inline struct dst_entry *xfrm_lookup_route(struct net *net,
+                                                 struct dst_entry *dst_orig,
+                                                 const struct flowi *fl,
+                                                 struct sock *sk,
+                                                 int flags)
+{
+       return dst_orig;
+}
 
 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
 {
@@ -502,6 +512,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                              const struct flowi *fl, struct sock *sk,
                              int flags);
 
+struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
+                                   const struct flowi *fl, struct sock *sk,
+                                   int flags);
+
 /* skb attached with this dst needs transformation if dst->xfrm is valid */
 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
 {
index 93695f0..af10c2c 100644 (file)
@@ -394,4 +394,12 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
        return netlink_set_err(net->genl_sock, portid, group, code);
 }
 
+static inline int genl_has_listeners(struct genl_family *family,
+                                    struct sock *sk, unsigned int group)
+{
+       if (WARN_ON_ONCE(group >= family->n_mcgrps))
+               return -EINVAL;
+       group = family->mcgrp_offset + group;
+       return netlink_has_listeners(sk, group);
+}
 #endif /* __NET_GENERIC_NETLINK_H */
index a3cfb8e..620e086 100644 (file)
@@ -231,7 +231,8 @@ struct qdisc_skb_cb {
        unsigned int            pkt_len;
        u16                     slave_dev_queue_mapping;
        u16                     _pad;
-       unsigned char           data[24];
+#define QDISC_CB_PRIV_LEN 20
+       unsigned char           data[QDISC_CB_PRIV_LEN];
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
index 1ea0b65..a2bf41e 100644 (file)
@@ -47,6 +47,7 @@ struct ib_umem {
        int                     writable;
        int                     hugetlb;
        struct work_struct      work;
+       struct pid             *pid;
        struct mm_struct       *mm;
        unsigned long           diff;
        struct sg_table sg_head;
index cdcc90b..e645835 100644 (file)
@@ -68,7 +68,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
                return;
 
        if (!shost_use_blk_mq(sdev->host) &&
-           blk_queue_tagged(sdev->request_queue))
+           !blk_queue_tagged(sdev->request_queue))
                blk_queue_init_tags(sdev->request_queue, depth,
                                    sdev->host->bqt);
 
index 24e9033..be88166 100644 (file)
@@ -240,6 +240,7 @@ header-y += matroxfb.h
 header-y += mdio.h
 header-y += media.h
 header-y += mei.h
+header-y += memfd.h
 header-y += mempolicy.h
 header-y += meye.h
 header-y += mic_common.h
@@ -395,6 +396,7 @@ header-y += un.h
 header-y += unistd.h
 header-y += unix_diag.h
 header-y += usbdevice_fs.h
+header-y += usbip.h
 header-y += utime.h
 header-y += utsname.h
 header-y += uuid.h
index 19df18c..1874ebe 100644 (file)
@@ -165,6 +165,7 @@ struct input_keymap_entry {
 #define INPUT_PROP_BUTTONPAD           0x02    /* has button(s) under pad */
 #define INPUT_PROP_SEMI_MT             0x03    /* touch rectangle only */
 #define INPUT_PROP_TOPBUTTONPAD                0x04    /* softbuttons at top of pad */
+#define INPUT_PROP_POINTING_STICK      0x05    /* is a pointing stick */
 
 #define INPUT_PROP_MAX                 0x1f
 #define INPUT_PROP_CNT                 (INPUT_PROP_MAX + 1)
index 131a6cc..14334d0 100644 (file)
@@ -53,6 +53,9 @@
 /* operation as Dom0 is supported */
 #define XENFEAT_dom0                      11
 
+/* Xen also maps grant references at pfn = mfn */
+#define XENFEAT_grant_map_identity        12
+
 #define XENFEAT_NR_SUBMAPS 1
 
 #endif /* __XEN_PUBLIC_FEATURES_H__ */
index b6237c3..82f2288 100644 (file)
@@ -539,6 +539,12 @@ void __init prepare_namespace(void)
 {
        int is_floppy;
 
+       if (root_delay) {
+               printk(KERN_INFO "Waiting %d sec before mounting root device...\n",
+                      root_delay);
+               ssleep(root_delay);
+       }
+
        /*
         * wait for the known devices to complete their probing
         *
@@ -565,12 +571,6 @@ void __init prepare_namespace(void)
        if (initrd_load())
                goto out;
 
-       if (root_delay) {
-               pr_info("Waiting %d sec before mounting root device...\n",
-                       root_delay);
-               ssleep(root_delay);
-       }
-
        /* wait for any asynchronous scanning to complete */
        if ((ROOT_DEV == 0) && root_wait) {
                printk(KERN_INFO "Waiting for root device %s...\n",
index 940aced..3a73f99 100644 (file)
@@ -3985,7 +3985,6 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
 
        l = cgroup_pidlist_find_create(cgrp, type);
        if (!l) {
-               mutex_unlock(&cgrp->pidlist_mutex);
                pidlist_free(array);
                return -ENOMEM;
        }
index f9c1ed0..d640a8b 100644 (file)
@@ -1524,6 +1524,11 @@ retry:
         */
        if (ctx->is_active) {
                raw_spin_unlock_irq(&ctx->lock);
+               /*
+                * Reload the task pointer, it might have been changed by
+                * a concurrent perf_event_context_sched_out().
+                */
+               task = ctx->task;
                goto retry;
        }
 
@@ -1967,6 +1972,11 @@ retry:
         */
        if (ctx->is_active) {
                raw_spin_unlock_irq(&ctx->lock);
+               /*
+                * Reload the task pointer, it might have been changed by
+                * a concurrent perf_event_context_sched_out().
+                */
+               task = ctx->task;
                goto retry;
        }
 
index d3a9d94..815d7af 100644 (file)
@@ -2592,6 +2592,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
         * shared futexes. We need to compare the keys:
         */
        if (match_futex(&q.key, &key2)) {
+               queue_unlock(hb);
                ret = -EINVAL;
                goto out_put_keys;
        }
index e30ac0f..0aa69ea 100644 (file)
@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type)
  */
 static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
 {
-       long ret;
+       long t1, t2;
 
-       ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
+       t1 = kptr_obfuscate((long)v1, type);
+       t2 = kptr_obfuscate((long)v2, type);
 
-       return (ret < 0) | ((ret > 0) << 1);
+       return (t1 < t2) | ((t1 > t2) << 1);
 }
 
 /* The caller must have pinned the task */
index e04c455..1ce7706 100644 (file)
@@ -1665,15 +1665,15 @@ asmlinkage int vprintk_emit(int facility, int level,
        raw_spin_lock(&logbuf_lock);
        logbuf_cpu = this_cpu;
 
-       if (recursion_bug) {
+       if (unlikely(recursion_bug)) {
                static const char recursion_msg[] =
                        "BUG: recent printk recursion!";
 
                recursion_bug = 0;
-               text_len = strlen(recursion_msg);
                /* emit KERN_CRIT message */
                printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
-                                        NULL, 0, recursion_msg, text_len);
+                                        NULL, 0, recursion_msg,
+                                        strlen(recursion_msg));
        }
 
        /*
index 4aec4a4..a7077d3 100644 (file)
@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
 static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
                                                        ktime_t now)
 {
+       unsigned long flags;
        struct k_itimer *ptr = container_of(alarm, struct k_itimer,
                                                it.alarm.alarmtimer);
-       if (posix_timer_event(ptr, 0) != 0)
-               ptr->it_overrun++;
+       enum alarmtimer_restart result = ALARMTIMER_NORESTART;
+
+       spin_lock_irqsave(&ptr->it_lock, flags);
+       if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
+               if (posix_timer_event(ptr, 0) != 0)
+                       ptr->it_overrun++;
+       }
 
        /* Re-add periodic timers */
        if (ptr->it.alarm.interval.tv64) {
                ptr->it_overrun += alarm_forward(alarm, now,
                                                ptr->it.alarm.interval);
-               return ALARMTIMER_RESTART;
+               result = ALARMTIMER_RESTART;
        }
-       return ALARMTIMER_NORESTART;
+       spin_unlock_irqrestore(&ptr->it_lock, flags);
+
+       return result;
 }
 
 /**
@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
  * @new_timer: k_itimer pointer
  * @cur_setting: itimerspec data to fill
  *
- * Copies the itimerspec data out from the k_itimer
+ * Copies out the current itimerspec data
  */
 static void alarm_timer_get(struct k_itimer *timr,
                                struct itimerspec *cur_setting)
 {
-       memset(cur_setting, 0, sizeof(struct itimerspec));
+       ktime_t relative_expiry_time =
+               alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
+
+       if (ktime_to_ns(relative_expiry_time) > 0) {
+               cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
+       } else {
+               cur_setting->it_value.tv_sec = 0;
+               cur_setting->it_value.tv_nsec = 0;
+       }
 
-       cur_setting->it_interval =
-                       ktime_to_timespec(timr->it.alarm.interval);
-       cur_setting->it_value =
-               ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
-       return;
+       cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
 }
 
 /**
index f0294ba..a9ae20f 100644 (file)
@@ -559,17 +559,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
  * that a remainder subtract here would not do the right thing as the
  * resolution values don't fall on second boundries.  I.e. the line:
  * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
+ * Note that due to the small error in the multiplier here, this
+ * rounding is incorrect for sufficiently large values of tv_nsec, but
+ * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
+ * OK.
  *
  * Rather, we just shift the bits off the right.
  *
  * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
  * value to a scaled second value.
  */
-unsigned long
-timespec_to_jiffies(const struct timespec *value)
+static unsigned long
+__timespec_to_jiffies(unsigned long sec, long nsec)
 {
-       unsigned long sec = value->tv_sec;
-       long nsec = value->tv_nsec + TICK_NSEC - 1;
+       nsec = nsec + TICK_NSEC - 1;
 
        if (sec >= MAX_SEC_IN_JIFFIES){
                sec = MAX_SEC_IN_JIFFIES;
@@ -580,6 +583,13 @@ timespec_to_jiffies(const struct timespec *value)
                 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
 
 }
+
+unsigned long
+timespec_to_jiffies(const struct timespec *value)
+{
+       return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
+}
+
 EXPORT_SYMBOL(timespec_to_jiffies);
 
 void
@@ -596,31 +606,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
 }
 EXPORT_SYMBOL(jiffies_to_timespec);
 
-/* Same for "timeval"
- *
- * Well, almost.  The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part.  Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
+/*
+ * We could use a similar algorithm to timespec_to_jiffies (with a
+ * different multiplier for usec instead of nsec). But this has a
+ * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
+ * usec value, since it's not necessarily integral.
+ *
+ * We could instead round in the intermediate scaled representation
+ * (i.e. in units of 1/2^(large scale) jiffies) but that's also
+ * perilous: the scaling introduces a small positive error, which
+ * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
+ * units to the intermediate before shifting) leads to accidental
+ * overflow and overestimates.
+ *
+ * At the cost of one additional multiplication by a constant, just
+ * use the timespec implementation.
  */
 unsigned long
 timeval_to_jiffies(const struct timeval *value)
 {
-       unsigned long sec = value->tv_sec;
-       long usec = value->tv_usec;
-
-       if (sec >= MAX_SEC_IN_JIFFIES){
-               sec = MAX_SEC_IN_JIFFIES;
-               usec = 0;
-       }
-       return (((u64)sec * SEC_CONVERSION) +
-               (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
-                (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+       return __timespec_to_jiffies(value->tv_sec,
+                                    value->tv_usec * NSEC_PER_USEC);
 }
 EXPORT_SYMBOL(timeval_to_jiffies);
 
index a5ce0c7..54cf309 100644 (file)
@@ -51,6 +51,9 @@ config PERCPU_RWSEM
 config ARCH_USE_CMPXCHG_LOCKREF
        bool
 
+config ARCH_HAS_FAST_MULTIPLIER
+       bool
+
 config CRC_CCITT
        tristate "CRC-CCITT functions"
        help
index ae146f0..2404d03 100644 (file)
@@ -1723,11 +1723,13 @@ ascend_old_tree:
                shortcut = assoc_array_ptr_to_shortcut(ptr);
                slot = shortcut->parent_slot;
                cursor = shortcut->back_pointer;
+               if (!cursor)
+                       goto gc_complete;
        } else {
                slot = node->parent_slot;
                cursor = ptr;
        }
-       BUG_ON(!ptr);
+       BUG_ON(!cursor);
        node = assoc_array_ptr_to_node(cursor);
        slot++;
        goto continue_node;
index b7d81ba..9a5c1f2 100644 (file)
@@ -11,7 +11,7 @@
 
 unsigned int __sw_hweight32(unsigned int w)
 {
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
        w -= (w >> 1) & 0x55555555;
        w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
        w =  (w + (w >> 4)) & 0x0f0f0f0f;
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
        return __sw_hweight32((unsigned int)(w >> 32)) +
               __sw_hweight32((unsigned int)w);
 #elif BITS_PER_LONG == 64
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
        w -= (w >> 1) & 0x5555555555555555ul;
        w =  (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
        w =  (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
index a2c7881..7b36e4d 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/hash.h>
 #include <linux/random.h>
 #include <linux/rhashtable.h>
-#include <linux/log2.h>
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4UL
index 992bf30..f3c6ff5 100644 (file)
@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
                return check_bytes8(start, value, bytes);
 
        value64 = value;
-#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
        value64 *= 0x0101010101010101;
-#elif defined(ARCH_HAS_FAST_MULTIPLIER)
+#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
        value64 *= 0x01010101;
        value64 |= value64 << 32;
 #else
index 306baa5..ba8019b 100644 (file)
@@ -176,7 +176,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
        if (list_empty(&dev->dma_pools) &&
            device_create_file(dev, &dev_attr_pools)) {
                kfree(retval);
-               return NULL;
+               retval = NULL;
        } else
                list_add(&retval->pools, &dev->dma_pools);
        mutex_unlock(&pools_lock);
index 70fad0c..6ecb0d9 100644 (file)
@@ -816,6 +816,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid,
                if (nid != NUMA_NO_NODE && nid != m_nid)
                        continue;
 
+               /* skip hotpluggable memory regions if needed */
+               if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
+                       continue;
+
                if (!type_b) {
                        if (out_start)
                                *out_start = m_start;
index adeac30..d17f1bc 100644 (file)
@@ -118,6 +118,8 @@ __setup("norandmaps", disable_randmaps);
 unsigned long zero_pfn __read_mostly;
 unsigned long highest_memmap_pfn __read_mostly;
 
+EXPORT_SYMBOL(zero_pfn);
+
 /*
  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
  */
index c1f2ea4..c0a3637 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -369,20 +369,20 @@ static int browse_rb(struct rb_root *root)
                struct vm_area_struct *vma;
                vma = rb_entry(nd, struct vm_area_struct, vm_rb);
                if (vma->vm_start < prev) {
-                       pr_info("vm_start %lx prev %lx\n", vma->vm_start, prev);
+                       pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev);
                        bug = 1;
                }
                if (vma->vm_start < pend) {
-                       pr_info("vm_start %lx pend %lx\n", vma->vm_start, pend);
+                       pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend);
                        bug = 1;
                }
                if (vma->vm_start > vma->vm_end) {
-                       pr_info("vm_end %lx < vm_start %lx\n",
+                       pr_emerg("vm_end %lx < vm_start %lx\n",
                                vma->vm_end, vma->vm_start);
                        bug = 1;
                }
                if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
-                       pr_info("free gap %lx, correct %lx\n",
+                       pr_emerg("free gap %lx, correct %lx\n",
                               vma->rb_subtree_gap,
                               vma_compute_subtree_gap(vma));
                        bug = 1;
@@ -396,7 +396,7 @@ static int browse_rb(struct rb_root *root)
        for (nd = pn; nd; nd = rb_prev(nd))
                j++;
        if (i != j) {
-               pr_info("backwards %d, forwards %d\n", j, i);
+               pr_emerg("backwards %d, forwards %d\n", j, i);
                bug = 1;
        }
        return bug ? -1 : i;
@@ -431,17 +431,17 @@ static void validate_mm(struct mm_struct *mm)
                i++;
        }
        if (i != mm->map_count) {
-               pr_info("map_count %d vm_next %d\n", mm->map_count, i);
+               pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
                bug = 1;
        }
        if (highest_address != mm->highest_vm_end) {
-               pr_info("mm->highest_vm_end %lx, found %lx\n",
+               pr_emerg("mm->highest_vm_end %lx, found %lx\n",
                       mm->highest_vm_end, highest_address);
                bug = 1;
        }
        i = browse_rb(&mm->mm_rb);
        if (i != mm->map_count) {
-               pr_info("map_count %d rb %d\n", mm->map_count, i);
+               pr_emerg("map_count %d rb %d\n", mm->map_count, i);
                bug = 1;
        }
        BUG_ON(bug);
index 7ed5860..7c7ab32 100644 (file)
@@ -119,6 +119,8 @@ static unsigned long __init free_low_memory_core_early(void)
        phys_addr_t start, end;
        u64 i;
 
+       memblock_clear_hotplug(0, -1);
+
        for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL)
                count += __free_memory_core(start, end);
 
index 62a7fa2..b6c04cb 100644 (file)
@@ -309,6 +309,9 @@ struct br_input_skb_cb {
        int igmp;
        int mrouters_only;
 #endif
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+       bool vlan_filtered;
+#endif
 };
 
 #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)
index e1bcd65..3ba57fc 100644 (file)
@@ -27,9 +27,13 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
 {
        if (flags & BRIDGE_VLAN_INFO_PVID)
                __vlan_add_pvid(v, vid);
+       else
+               __vlan_delete_pvid(v, vid);
 
        if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
                set_bit(vid, v->untagged_bitmap);
+       else
+               clear_bit(vid, v->untagged_bitmap);
 }
 
 static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
@@ -125,7 +129,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
 {
        u16 vid;
 
-       if (!br->vlan_enabled)
+       /* If this packet was not filtered at input, let it pass */
+       if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
                goto out;
 
        /* Vlan filter table must be configured at this point.  The
@@ -164,8 +169,10 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
        /* If VLAN filtering is disabled on the bridge, all packets are
         * permitted.
         */
-       if (!br->vlan_enabled)
+       if (!br->vlan_enabled) {
+               BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
                return true;
+       }
 
        /* If there are no vlan in the permitted list, all packets are
         * rejected.
@@ -173,6 +180,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
        if (!v)
                goto drop;
 
+       BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
        proto = br->vlan_proto;
 
        /* If vlan tx offload is disabled on bridge device and frame was
@@ -251,7 +259,8 @@ bool br_allowed_egress(struct net_bridge *br,
 {
        u16 vid;
 
-       if (!br->vlan_enabled)
+       /* If this packet was not filtered at input, let it pass */
+       if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
                return true;
 
        if (!v)
@@ -270,6 +279,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
        struct net_bridge *br = p->br;
        struct net_port_vlans *v;
 
+       /* If filtering was disabled at input, let it pass. */
        if (!br->vlan_enabled)
                return true;
 
index 96238ba..de6662b 100644 (file)
@@ -13,8 +13,6 @@
 #include "auth_x.h"
 #include "auth_x_protocol.h"
 
-#define TEMP_TICKET_BUF_LEN    256
-
 static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
 
 static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
 }
 
 static int ceph_x_decrypt(struct ceph_crypto_key *secret,
-                         void **p, void *end, void *obuf, size_t olen)
+                         void **p, void *end, void **obuf, size_t olen)
 {
        struct ceph_x_encrypt_header head;
        size_t head_len = sizeof(head);
@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
                return -EINVAL;
 
        dout("ceph_x_decrypt len %d\n", len);
-       ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
-                           *p, len);
+       if (*obuf == NULL) {
+               *obuf = kmalloc(len, GFP_NOFS);
+               if (!*obuf)
+                       return -ENOMEM;
+               olen = len;
+       }
+
+       ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
        if (ret)
                return ret;
        if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
@@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
        kfree(th);
 }
 
-static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
-                                   struct ceph_crypto_key *secret,
-                                   void *buf, void *end)
+static int process_one_ticket(struct ceph_auth_client *ac,
+                             struct ceph_crypto_key *secret,
+                             void **p, void *end)
 {
        struct ceph_x_info *xi = ac->private;
-       int num;
-       void *p = buf;
+       int type;
+       u8 tkt_struct_v, blob_struct_v;
+       struct ceph_x_ticket_handler *th;
+       void *dbuf = NULL;
+       void *dp, *dend;
+       int dlen;
+       char is_enc;
+       struct timespec validity;
+       struct ceph_crypto_key old_key;
+       void *ticket_buf = NULL;
+       void *tp, *tpend;
+       struct ceph_timespec new_validity;
+       struct ceph_crypto_key new_session_key;
+       struct ceph_buffer *new_ticket_blob;
+       unsigned long new_expires, new_renew_after;
+       u64 new_secret_id;
        int ret;
-       char *dbuf;
-       char *ticket_buf;
-       u8 reply_struct_v;
 
-       dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
-       if (!dbuf)
-               return -ENOMEM;
+       ceph_decode_need(p, end, sizeof(u32) + 1, bad);
 
-       ret = -ENOMEM;
-       ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
-       if (!ticket_buf)
-               goto out_dbuf;
+       type = ceph_decode_32(p);
+       dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
 
-       ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
-       reply_struct_v = ceph_decode_8(&p);
-       if (reply_struct_v != 1)
+       tkt_struct_v = ceph_decode_8(p);
+       if (tkt_struct_v != 1)
                goto bad;
-       num = ceph_decode_32(&p);
-       dout("%d tickets\n", num);
-       while (num--) {
-               int type;
-               u8 tkt_struct_v, blob_struct_v;
-               struct ceph_x_ticket_handler *th;
-               void *dp, *dend;
-               int dlen;
-               char is_enc;
-               struct timespec validity;
-               struct ceph_crypto_key old_key;
-               void *tp, *tpend;
-               struct ceph_timespec new_validity;
-               struct ceph_crypto_key new_session_key;
-               struct ceph_buffer *new_ticket_blob;
-               unsigned long new_expires, new_renew_after;
-               u64 new_secret_id;
-
-               ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
-
-               type = ceph_decode_32(&p);
-               dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
-
-               tkt_struct_v = ceph_decode_8(&p);
-               if (tkt_struct_v != 1)
-                       goto bad;
-
-               th = get_ticket_handler(ac, type);
-               if (IS_ERR(th)) {
-                       ret = PTR_ERR(th);
-                       goto out;
-               }
 
-               /* blob for me */
-               dlen = ceph_x_decrypt(secret, &p, end, dbuf,
-                                     TEMP_TICKET_BUF_LEN);
-               if (dlen <= 0) {
-                       ret = dlen;
-                       goto out;
-               }
-               dout(" decrypted %d bytes\n", dlen);
-               dend = dbuf + dlen;
-               dp = dbuf;
+       th = get_ticket_handler(ac, type);
+       if (IS_ERR(th)) {
+               ret = PTR_ERR(th);
+               goto out;
+       }
 
-               tkt_struct_v = ceph_decode_8(&dp);
-               if (tkt_struct_v != 1)
-                       goto bad;
+       /* blob for me */
+       dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
+       if (dlen <= 0) {
+               ret = dlen;
+               goto out;
+       }
+       dout(" decrypted %d bytes\n", dlen);
+       dp = dbuf;
+       dend = dp + dlen;
 
-               memcpy(&old_key, &th->session_key, sizeof(old_key));
-               ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
-               if (ret)
-                       goto out;
+       tkt_struct_v = ceph_decode_8(&dp);
+       if (tkt_struct_v != 1)
+               goto bad;
 
-               ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
-               ceph_decode_timespec(&validity, &new_validity);
-               new_expires = get_seconds() + validity.tv_sec;
-               new_renew_after = new_expires - (validity.tv_sec / 4);
-               dout(" expires=%lu renew_after=%lu\n", new_expires,
-                    new_renew_after);
+       memcpy(&old_key, &th->session_key, sizeof(old_key));
+       ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
+       if (ret)
+               goto out;
 
-               /* ticket blob for service */
-               ceph_decode_8_safe(&p, end, is_enc, bad);
-               tp = ticket_buf;
-               if (is_enc) {
-                       /* encrypted */
-                       dout(" encrypted ticket\n");
-                       dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
-                                             TEMP_TICKET_BUF_LEN);
-                       if (dlen < 0) {
-                               ret = dlen;
-                               goto out;
-                       }
-                       dlen = ceph_decode_32(&tp);
-               } else {
-                       /* unencrypted */
-                       ceph_decode_32_safe(&p, end, dlen, bad);
-                       ceph_decode_need(&p, end, dlen, bad);
-                       ceph_decode_copy(&p, ticket_buf, dlen);
+       ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+       ceph_decode_timespec(&validity, &new_validity);
+       new_expires = get_seconds() + validity.tv_sec;
+       new_renew_after = new_expires - (validity.tv_sec / 4);
+       dout(" expires=%lu renew_after=%lu\n", new_expires,
+            new_renew_after);
+
+       /* ticket blob for service */
+       ceph_decode_8_safe(p, end, is_enc, bad);
+       if (is_enc) {
+               /* encrypted */
+               dout(" encrypted ticket\n");
+               dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
+               if (dlen < 0) {
+                       ret = dlen;
+                       goto out;
                }
-               tpend = tp + dlen;
-               dout(" ticket blob is %d bytes\n", dlen);
-               ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
-               blob_struct_v = ceph_decode_8(&tp);
-               new_secret_id = ceph_decode_64(&tp);
-               ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
-               if (ret)
+               tp = ticket_buf;
+               dlen = ceph_decode_32(&tp);
+       } else {
+               /* unencrypted */
+               ceph_decode_32_safe(p, end, dlen, bad);
+               ticket_buf = kmalloc(dlen, GFP_NOFS);
+               if (!ticket_buf) {
+                       ret = -ENOMEM;
                        goto out;
-
-               /* all is well, update our ticket */
-               ceph_crypto_key_destroy(&th->session_key);
-               if (th->ticket_blob)
-                       ceph_buffer_put(th->ticket_blob);
-               th->session_key = new_session_key;
-               th->ticket_blob = new_ticket_blob;
-               th->validity = new_validity;
-               th->secret_id = new_secret_id;
-               th->expires = new_expires;
-               th->renew_after = new_renew_after;
-               dout(" got ticket service %d (%s) secret_id %lld len %d\n",
-                    type, ceph_entity_type_name(type), th->secret_id,
-                    (int)th->ticket_blob->vec.iov_len);
-               xi->have_keys |= th->service;
+               }
+               tp = ticket_buf;
+               ceph_decode_need(p, end, dlen, bad);
+               ceph_decode_copy(p, ticket_buf, dlen);
        }
+       tpend = tp + dlen;
+       dout(" ticket blob is %d bytes\n", dlen);
+       ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
+       blob_struct_v = ceph_decode_8(&tp);
+       new_secret_id = ceph_decode_64(&tp);
+       ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
+       if (ret)
+               goto out;
+
+       /* all is well, update our ticket */
+       ceph_crypto_key_destroy(&th->session_key);
+       if (th->ticket_blob)
+               ceph_buffer_put(th->ticket_blob);
+       th->session_key = new_session_key;
+       th->ticket_blob = new_ticket_blob;
+       th->validity = new_validity;
+       th->secret_id = new_secret_id;
+       th->expires = new_expires;
+       th->renew_after = new_renew_after;
+       dout(" got ticket service %d (%s) secret_id %lld len %d\n",
+            type, ceph_entity_type_name(type), th->secret_id,
+            (int)th->ticket_blob->vec.iov_len);
+       xi->have_keys |= th->service;
 
-       ret = 0;
 out:
        kfree(ticket_buf);
-out_dbuf:
        kfree(dbuf);
        return ret;
 
@@ -270,6 +255,34 @@ bad:
        goto out;
 }
 
+static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
+                                   struct ceph_crypto_key *secret,
+                                   void *buf, void *end)
+{
+       void *p = buf;
+       u8 reply_struct_v;
+       u32 num;
+       int ret;
+
+       ceph_decode_8_safe(&p, end, reply_struct_v, bad);
+       if (reply_struct_v != 1)
+               return -EINVAL;
+
+       ceph_decode_32_safe(&p, end, num, bad);
+       dout("%d tickets\n", num);
+
+       while (num--) {
+               ret = process_one_ticket(ac, secret, &p, end);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+
+bad:
+       return -EINVAL;
+}
+
 static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
                                   struct ceph_x_ticket_handler *th,
                                   struct ceph_x_authorizer *au)
@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
        struct ceph_x_ticket_handler *th;
        int ret = 0;
        struct ceph_x_authorize_reply reply;
+       void *preply = &reply;
        void *p = au->reply_buf;
        void *end = p + sizeof(au->reply_buf);
 
        th = get_ticket_handler(ac, au->service);
        if (IS_ERR(th))
                return PTR_ERR(th);
-       ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
+       ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
        if (ret < 0)
                return ret;
        if (ret != sizeof(reply))
index 067d3af..61fcfc3 100644 (file)
@@ -1181,7 +1181,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
        if (!m) {
                pr_info("alloc_msg unknown type %d\n", type);
                *skip = 1;
+       } else if (front_len > m->front_alloc_len) {
+               pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
+                          front_len, m->front_alloc_len,
+                          (unsigned int)con->peer_name.type,
+                          le64_to_cpu(con->peer_name.num));
+               ceph_msg_put(m);
+               m = ceph_msg_new(type, front_len, GFP_NOFS, false);
        }
+
        return m;
 }
 
index ab9a165..cf8a95f 100644 (file)
@@ -4809,9 +4809,14 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev,
        sysfs_remove_link(&(dev->dev.kobj), linkname);
 }
 
-#define netdev_adjacent_is_neigh_list(dev, dev_list) \
-               (dev_list == &dev->adj_list.upper || \
-                dev_list == &dev->adj_list.lower)
+static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
+                                                struct net_device *adj_dev,
+                                                struct list_head *dev_list)
+{
+       return (dev_list == &dev->adj_list.upper ||
+               dev_list == &dev->adj_list.lower) &&
+               net_eq(dev_net(dev), dev_net(adj_dev));
+}
 
 static int __netdev_adjacent_dev_insert(struct net_device *dev,
                                        struct net_device *adj_dev,
@@ -4841,7 +4846,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
        pr_debug("dev_hold for %s, because of link added from %s to %s\n",
                 adj_dev->name, dev->name, adj_dev->name);
 
-       if (netdev_adjacent_is_neigh_list(dev, dev_list)) {
+       if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
                ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
                if (ret)
                        goto free_adj;
@@ -4862,7 +4867,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
        return 0;
 
 remove_symlinks:
-       if (netdev_adjacent_is_neigh_list(dev, dev_list))
+       if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
                netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
 free_adj:
        kfree(adj);
@@ -4895,8 +4900,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
        if (adj->master)
                sysfs_remove_link(&(dev->dev.kobj), "master");
 
-       if (netdev_adjacent_is_neigh_list(dev, dev_list) &&
-           net_eq(dev_net(dev),dev_net(adj_dev)))
+       if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
                netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
 
        list_del_rcu(&adj->list);
index d372b4b..9c3f823 100644 (file)
@@ -1866,7 +1866,7 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
  * skb_page_frag_refill - check that a page_frag contains enough room
  * @sz: minimum size of the fragment we want to get
  * @pfrag: pointer to page_frag
- * @prio: priority for memory allocation
+ * @gfp: priority for memory allocation
  *
  * Note: While this allocator tries to use high order pages, there is
  * no guarantee that allocations succeed. Therefore, @sz MUST be
index afed1aa..bd41dd1 100644 (file)
@@ -79,10 +79,10 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
        idst->saddr = saddr;
 }
 
-static void tunnel_dst_set(struct ip_tunnel *t,
+static noinline void tunnel_dst_set(struct ip_tunnel *t,
                           struct dst_entry *dst, __be32 saddr)
 {
-       __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);
+       __tunnel_dst_set(raw_cpu_ptr(t->dst_cache), dst, saddr);
 }
 
 static void tunnel_dst_reset(struct ip_tunnel *t)
@@ -106,7 +106,7 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
        struct dst_entry *dst;
 
        rcu_read_lock();
-       idst = this_cpu_ptr(t->dst_cache);
+       idst = raw_cpu_ptr(t->dst_cache);
        dst = rcu_dereference(idst->dst);
        if (dst && !atomic_inc_not_zero(&dst->__refcnt))
                dst = NULL;
index eaa4b00..173e7ea 100644 (file)
@@ -2265,9 +2265,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
                return rt;
 
        if (flp4->flowi4_proto)
-               rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
-                                                  flowi4_to_flowi(flp4),
-                                                  sk, 0);
+               rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
+                                                       flowi4_to_flowi(flp4),
+                                                       sk, 0);
 
        return rt;
 }
index fc1fac2..3342ee6 100644 (file)
@@ -3094,11 +3094,13 @@ static int addrconf_ifdown(struct net_device *dev, int how)
 
        write_unlock_bh(&idev->lock);
 
-       /* Step 5: Discard multicast list */
-       if (how)
+       /* Step 5: Discard anycast and multicast list */
+       if (how) {
+               ipv6_ac_destroy_dev(idev);
                ipv6_mc_destroy_dev(idev);
-       else
+       } else {
                ipv6_mc_down(idev);
+       }
 
        idev->tstamp = jiffies;
 
index ff2de7d..9a38684 100644 (file)
@@ -351,6 +351,27 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
        return __ipv6_dev_ac_dec(idev, addr);
 }
 
+void ipv6_ac_destroy_dev(struct inet6_dev *idev)
+{
+       struct ifacaddr6 *aca;
+
+       write_lock_bh(&idev->lock);
+       while ((aca = idev->ac_list) != NULL) {
+               idev->ac_list = aca->aca_next;
+               write_unlock_bh(&idev->lock);
+
+               addrconf_leave_solict(idev, &aca->aca_addr);
+
+               dst_hold(&aca->aca_rt->dst);
+               ip6_del_rt(aca->aca_rt);
+
+               aca_put(aca);
+
+               write_lock_bh(&idev->lock);
+       }
+       write_unlock_bh(&idev->lock);
+}
+
 /*
  *     check if the interface has this anycast address
  *     called with rcu_read_lock()
index 315a55d..0a3448b 100644 (file)
@@ -1009,7 +1009,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        if (final_dst)
                fl6->daddr = *final_dst;
 
-       return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+       return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
 
@@ -1041,7 +1041,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        if (final_dst)
                fl6->daddr = *final_dst;
 
-       return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+       return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
 
index 441875f..a1e433b 100644 (file)
@@ -1822,7 +1822,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
        if (sdata->vif.bss_conf.use_short_slot)
                sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
-       sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
+       sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period;
        sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
 
        sinfo->sta_flags.set = 0;
index 91d66b7..64dc864 100644 (file)
@@ -78,11 +78,12 @@ static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
 
 /* Check if need to build a reply message.
  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
-static bool ovs_must_notify(struct genl_info *info,
-                           const struct genl_multicast_group *grp)
+static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
+                           unsigned int group)
 {
        return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
-               netlink_has_listeners(genl_info_net(info)->genl_sock, 0);
+              genl_has_listeners(family, genl_info_net(info)->genl_sock,
+                                 group);
 }
 
 static void ovs_notify(struct genl_family *family,
@@ -763,7 +764,7 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *act
 {
        struct sk_buff *skb;
 
-       if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
+       if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
                return NULL;
 
        skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
index 02a86a2..5fa54dd 100644 (file)
@@ -163,6 +163,7 @@ static const struct acpi_device_id rfkill_acpi_match[] = {
        { "LNV4752", RFKILL_TYPE_GPS },
        { },
 };
+MODULE_DEVICE_TABLE(acpi, rfkill_acpi_match);
 #endif
 
 static struct platform_driver rfkill_gpio_driver = {
index b45d080..1b24191 100644 (file)
@@ -1143,7 +1143,7 @@ static long rxrpc_read(const struct key *key,
                if (copy_to_user(xdr, (s), _l) != 0)                    \
                        goto fault;                                     \
                if (_l & 3 &&                                           \
-                   copy_to_user((u8 *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
+                   copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
                        goto fault;                                     \
                xdr += (_l + 3) >> 2;                                   \
        } while(0)
index ed30e43..fb666d1 100644 (file)
@@ -133,10 +133,16 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
        --sch->q.qlen;
 }
 
+/* private part of skb->cb[] that a qdisc is allowed to use
+ * is limited to QDISC_CB_PRIV_LEN bytes.
+ * As a flow key might be too large, we store a part of it only.
+ */
+#define CHOKE_K_LEN min_t(u32, sizeof(struct flow_keys), QDISC_CB_PRIV_LEN - 3)
+
 struct choke_skb_cb {
        u16                     classid;
        u8                      keys_valid;
-       struct flow_keys        keys;
+       u8                      keys[QDISC_CB_PRIV_LEN - 3];
 };
 
 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -163,22 +169,26 @@ static u16 choke_get_classid(const struct sk_buff *skb)
 static bool choke_match_flow(struct sk_buff *skb1,
                             struct sk_buff *skb2)
 {
+       struct flow_keys temp;
+
        if (skb1->protocol != skb2->protocol)
                return false;
 
        if (!choke_skb_cb(skb1)->keys_valid) {
                choke_skb_cb(skb1)->keys_valid = 1;
-               skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
+               skb_flow_dissect(skb1, &temp);
+               memcpy(&choke_skb_cb(skb1)->keys, &temp, CHOKE_K_LEN);
        }
 
        if (!choke_skb_cb(skb2)->keys_valid) {
                choke_skb_cb(skb2)->keys_valid = 1;
-               skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
+               skb_flow_dissect(skb2, &temp);
+               memcpy(&choke_skb_cb(skb2)->keys, &temp, CHOKE_K_LEN);
        }
 
        return !memcmp(&choke_skb_cb(skb1)->keys,
                       &choke_skb_cb(skb2)->keys,
-                      sizeof(struct flow_keys));
+                      CHOKE_K_LEN);
 }
 
 /*
index 2e2586e..4cdbc10 100644 (file)
@@ -1996,6 +1996,9 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
        if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
                return -EFAULT;
 
+       if (kmsg->msg_name == NULL)
+               kmsg->msg_namelen = 0;
+
        if (kmsg->msg_namelen < 0)
                return -EINVAL;
 
index df7b133..7257164 100644 (file)
@@ -6969,6 +6969,9 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
        struct nlattr *data = ((void **)skb->cb)[2];
        enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE;
 
+       /* clear CB data for netlink core to own from now on */
+       memset(skb->cb, 0, sizeof(skb->cb));
+
        nla_nest_end(skb, data);
        genlmsg_end(skb, hdr);
 
@@ -9294,6 +9297,9 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
        void *hdr = ((void **)skb->cb)[1];
        struct nlattr *data = ((void **)skb->cb)[2];
 
+       /* clear CB data for netlink core to own from now on */
+       memset(skb->cb, 0, sizeof(skb->cb));
+
        if (WARN_ON(!rdev->cur_cmd_info)) {
                kfree_skb(skb);
                return -EINVAL;
index beeed60..fdde51f 100644 (file)
 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
 #define XFRM_MAX_QUEUE_LEN     100
 
+struct xfrm_flo {
+       struct dst_entry *dst_orig;
+       u8 flags;
+};
+
 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
                                                __read_mostly;
@@ -1877,13 +1882,14 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
 }
 
 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
-                                                struct dst_entry *dst,
+                                                struct xfrm_flo *xflo,
                                                 const struct flowi *fl,
                                                 int num_xfrms,
                                                 u16 family)
 {
        int err;
        struct net_device *dev;
+       struct dst_entry *dst;
        struct dst_entry *dst1;
        struct xfrm_dst *xdst;
 
@@ -1891,9 +1897,12 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
        if (IS_ERR(xdst))
                return xdst;
 
-       if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
+       if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
+           net->xfrm.sysctl_larval_drop ||
+           num_xfrms <= 0)
                return xdst;
 
+       dst = xflo->dst_orig;
        dst1 = &xdst->u.dst;
        dst_hold(dst);
        xdst->route = dst;
@@ -1935,7 +1944,7 @@ static struct flow_cache_object *
 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
                   struct flow_cache_object *oldflo, void *ctx)
 {
-       struct dst_entry *dst_orig = (struct dst_entry *)ctx;
+       struct xfrm_flo *xflo = (struct xfrm_flo *)ctx;
        struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
        struct xfrm_dst *xdst, *new_xdst;
        int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
@@ -1976,7 +1985,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
                        goto make_dummy_bundle;
        }
 
-       new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
+       new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
+                                                 xflo->dst_orig);
        if (IS_ERR(new_xdst)) {
                err = PTR_ERR(new_xdst);
                if (err != -EAGAIN)
@@ -2010,7 +2020,7 @@ make_dummy_bundle:
        /* We found policies, but there's no bundles to instantiate:
         * either because the policy blocks, has no transformations or
         * we could not build template (no xfrm_states).*/
-       xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
+       xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
        if (IS_ERR(xdst)) {
                xfrm_pols_put(pols, num_pols);
                return ERR_CAST(xdst);
@@ -2104,13 +2114,18 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
        }
 
        if (xdst == NULL) {
+               struct xfrm_flo xflo;
+
+               xflo.dst_orig = dst_orig;
+               xflo.flags = flags;
+
                /* To accelerate a bit...  */
                if ((dst_orig->flags & DST_NOXFRM) ||
                    !net->xfrm.policy_count[XFRM_POLICY_OUT])
                        goto nopol;
 
                flo = flow_cache_lookup(net, fl, family, dir,
-                                       xfrm_bundle_lookup, dst_orig);
+                                       xfrm_bundle_lookup, &xflo);
                if (flo == NULL)
                        goto nopol;
                if (IS_ERR(flo)) {
@@ -2138,7 +2153,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                        xfrm_pols_put(pols, drop_pols);
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
 
-                       return make_blackhole(net, family, dst_orig);
+                       return ERR_PTR(-EREMOTE);
                }
 
                err = -EAGAIN;
@@ -2195,6 +2210,23 @@ dropdst:
 }
 EXPORT_SYMBOL(xfrm_lookup);
 
+/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
+ * Otherwise we may send out blackholed packets.
+ */
+struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
+                                   const struct flowi *fl,
+                                   struct sock *sk, int flags)
+{
+       struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
+                                           flags | XFRM_LOOKUP_QUEUE);
+
+       if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
+               return make_blackhole(net, dst_orig->ops->family, dst_orig);
+
+       return dst;
+}
+EXPORT_SYMBOL(xfrm_lookup_route);
+
 static inline int
 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
 {
@@ -2460,7 +2492,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
 
        skb_dst_force(skb);
 
-       dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
+       dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
        if (IS_ERR(dst)) {
                res = 0;
                dst = NULL;
index b385bcb..4d08b39 100755 (executable)
@@ -2133,7 +2133,10 @@ sub process {
 # Check for improperly formed commit descriptions
                if ($in_commit_log &&
                    $line =~ /\bcommit\s+[0-9a-f]{5,}/i &&
-                   $line !~ /\b[Cc]ommit [0-9a-f]{12,40} \("/) {
+                   !($line =~ /\b[Cc]ommit [0-9a-f]{12,40} \("/ ||
+                     ($line =~ /\b[Cc]ommit [0-9a-f]{12,40}\s*$/ &&
+                      defined $rawlines[$linenr] &&
+                      $rawlines[$linenr] =~ /^\s*\("/))) {
                        $line =~ /\b(c)ommit\s+([0-9a-f]{5,})/i;
                        my $init_char = $1;
                        my $orig_commit = lc($2);
index 9acc77e..0032278 100644 (file)
@@ -1782,14 +1782,16 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
 {
        struct snd_pcm_hw_params *params = arg;
        snd_pcm_format_t format;
-       int channels, width;
+       int channels;
+       ssize_t frame_size;
 
        params->fifo_size = substream->runtime->hw.fifo_size;
        if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
                format = params_format(params);
                channels = params_channels(params);
-               width = snd_pcm_format_physical_width(format);
-               params->fifo_size /= width * channels;
+               frame_size = snd_pcm_format_size(format, channels);
+               if (frame_size > 0)
+                       params->fifo_size /= (unsigned)frame_size;
        }
        return 0;
 }
index 6e5d0cb..47ccb8f 100644 (file)
@@ -777,6 +777,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
        { .id = CXT_PINCFG_LENOVO_TP410, .name = "tp410" },
        { .id = CXT_FIXUP_THINKPAD_ACPI, .name = "thinkpad" },
        { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" },
+       { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
        { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
        {}
 };
index ea823e1..98cd190 100644 (file)
@@ -566,8 +566,8 @@ static void stac_init_power_map(struct hda_codec *codec)
                if (snd_hda_jack_tbl_get(codec, nid))
                        continue;
                if (def_conf == AC_JACK_PORT_COMPLEX &&
-                   !(spec->vref_mute_led_nid == nid ||
-                     is_jack_detectable(codec, nid))) {
+                   spec->vref_mute_led_nid != nid &&
+                   is_jack_detectable(codec, nid)) {
                        snd_hda_jack_detect_enable_callback(codec, nid,
                                                            STAC_PWR_EVENT,
                                                            jack_update_power);
@@ -4276,11 +4276,18 @@ static int stac_parse_auto_config(struct hda_codec *codec)
                        return err;
        }
 
-       stac_init_power_map(codec);
-
        return 0;
 }
 
+static int stac_build_controls(struct hda_codec *codec)
+{
+       int err = snd_hda_gen_build_controls(codec);
+
+       if (err < 0)
+               return err;
+       stac_init_power_map(codec);
+       return 0;
+}
 
 static int stac_init(struct hda_codec *codec)
 {
@@ -4392,7 +4399,7 @@ static int stac_suspend(struct hda_codec *codec)
 #endif /* CONFIG_PM */
 
 static const struct hda_codec_ops stac_patch_ops = {
-       .build_controls = snd_hda_gen_build_controls,
+       .build_controls = stac_build_controls,
        .build_pcms = snd_hda_gen_build_pcms,
        .init = stac_init,
        .free = stac_free,
index 9852320..69a8516 100644 (file)
@@ -458,12 +458,12 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
                if (params_width(params) == 16) {
                        snd_soc_update_bits(codec, CS4265_DAC_CTL,
                                CS4265_DAC_CTL_DIF, (1 << 5));
-                       snd_soc_update_bits(codec, CS4265_ADC_CTL,
+                       snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
                                CS4265_SPDIF_CTL2_DIF, (1 << 7));
                } else {
                        snd_soc_update_bits(codec, CS4265_DAC_CTL,
                                CS4265_DAC_CTL_DIF, (3 << 5));
-                       snd_soc_update_bits(codec, CS4265_ADC_CTL,
+                       snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
                                CS4265_SPDIF_CTL2_DIF, (1 << 7));
                }
                break;
@@ -472,7 +472,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
                        CS4265_DAC_CTL_DIF, 0);
                snd_soc_update_bits(codec, CS4265_ADC_CTL,
                        CS4265_ADC_DIF, 0);
-               snd_soc_update_bits(codec, CS4265_ADC_CTL,
+               snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
                        CS4265_SPDIF_CTL2_DIF, (1 << 6));
 
                break;
index 9aa1323..89c748d 100644 (file)
@@ -4,7 +4,7 @@
  * sound/soc/codecs/sta529.c -- spear ALSA Soc codec driver
  *
  * Copyright (C) 2012 ST Microelectronics
- * Rajeev Kumar <rajeev-dlh.kumar@st.com>
+ * Rajeev Kumar <rajeevkumar.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -426,5 +426,5 @@ static struct i2c_driver sta529_i2c_driver = {
 module_i2c_driver(sta529_i2c_driver);
 
 MODULE_DESCRIPTION("ASoC STA529 codec driver");
-MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
+MODULE_AUTHOR("Rajeev Kumar <rajeevkumar.linux@gmail.com>");
 MODULE_LICENSE("GPL");
index 0f64c78..aea9e1f 100644 (file)
@@ -189,46 +189,57 @@ static const struct aic31xx_rate_divs aic31xx_divs[] = {
        /* mclk      rate  pll: p  j     d     dosr ndac mdac  aors nadc madc */
        /* 8k rate */
        {12000000,   8000,      1, 8, 1920,     128,  48,  2,   128,  48,  2},
+       {12000000,   8000,      1, 8, 1920,     128,  32,  3,   128,  32,  3},
        {24000000,   8000,      2, 8, 1920,     128,  48,  2,   128,  48,  2},
        {25000000,   8000,      2, 7, 8643,     128,  48,  2,   128,  48,  2},
        /* 11.025k rate */
        {12000000,  11025,      1, 7, 5264,     128,  32,  2,   128,  32,  2},
+       {12000000,  11025,      1, 8, 4672,     128,  24,  3,   128,  24,  3},
        {24000000,  11025,      2, 7, 5264,     128,  32,  2,   128,  32,  2},
        {25000000,  11025,      2, 7, 2253,     128,  32,  2,   128,  32,  2},
        /* 16k rate */
        {12000000,  16000,      1, 8, 1920,     128,  24,  2,   128,  24,  2},
+       {12000000,  16000,      1, 8, 1920,     128,  16,  3,   128,  16,  3},
        {24000000,  16000,      2, 8, 1920,     128,  24,  2,   128,  24,  2},
        {25000000,  16000,      2, 7, 8643,     128,  24,  2,   128,  24,  2},
        /* 22.05k rate */
        {12000000,  22050,      1, 7, 5264,     128,  16,  2,   128,  16,  2},
+       {12000000,  22050,      1, 8, 4672,     128,  12,  3,   128,  12,  3},
        {24000000,  22050,      2, 7, 5264,     128,  16,  2,   128,  16,  2},
        {25000000,  22050,      2, 7, 2253,     128,  16,  2,   128,  16,  2},
        /* 32k rate */
        {12000000,  32000,      1, 8, 1920,     128,  12,  2,   128,  12,  2},
+       {12000000,  32000,      1, 8, 1920,     128,   8,  3,   128,   8,  3},
        {24000000,  32000,      2, 8, 1920,     128,  12,  2,   128,  12,  2},
        {25000000,  32000,      2, 7, 8643,     128,  12,  2,   128,  12,  2},
        /* 44.1k rate */
        {12000000,  44100,      1, 7, 5264,     128,   8,  2,   128,   8,  2},
+       {12000000,  44100,      1, 8, 4672,     128,   6,  3,   128,   6,  3},
        {24000000,  44100,      2, 7, 5264,     128,   8,  2,   128,   8,  2},
        {25000000,  44100,      2, 7, 2253,     128,   8,  2,   128,   8,  2},
        /* 48k rate */
        {12000000,  48000,      1, 8, 1920,     128,   8,  2,   128,   8,  2},
+       {12000000,  48000,      1, 7, 6800,      96,   5,  4,    96,   5,  4},
        {24000000,  48000,      2, 8, 1920,     128,   8,  2,   128,   8,  2},
        {25000000,  48000,      2, 7, 8643,     128,   8,  2,   128,   8,  2},
        /* 88.2k rate */
        {12000000,  88200,      1, 7, 5264,      64,   8,  2,    64,   8,  2},
+       {12000000,  88200,      1, 8, 4672,      64,   6,  3,    64,   6,  3},
        {24000000,  88200,      2, 7, 5264,      64,   8,  2,    64,   8,  2},
        {25000000,  88200,      2, 7, 2253,      64,   8,  2,    64,   8,  2},
        /* 96k rate */
        {12000000,  96000,      1, 8, 1920,      64,   8,  2,    64,   8,  2},
+       {12000000,  96000,      1, 7, 6800,      48,   5,  4,    48,   5,  4},
        {24000000,  96000,      2, 8, 1920,      64,   8,  2,    64,   8,  2},
        {25000000,  96000,      2, 7, 8643,      64,   8,  2,    64,   8,  2},
        /* 176.4k rate */
        {12000000, 176400,      1, 7, 5264,      32,   8,  2,    32,   8,  2},
+       {12000000, 176400,      1, 8, 4672,      32,   6,  3,    32,   6,  3},
        {24000000, 176400,      2, 7, 5264,      32,   8,  2,    32,   8,  2},
        {25000000, 176400,      2, 7, 2253,      32,   8,  2,    32,   8,  2},
        /* 192k rate */
        {12000000, 192000,      1, 8, 1920,      32,   8,  2,    32,   8,  2},
+       {12000000, 192000,      1, 7, 6800,      24,   5,  4,    24,   5,  4},
        {24000000, 192000,      2, 8, 1920,      32,   8,  2,    32,   8,  2},
        {25000000, 192000,      2, 7, 8643,      32,   8,  2,    32,   8,  2},
 };
@@ -680,7 +691,9 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
                             struct snd_pcm_hw_params *params)
 {
        struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
+       int bclk_score = snd_soc_params_to_frame_size(params);
        int bclk_n = 0;
+       int match = -1;
        int i;
 
        /* Use PLL as CODEC_CLKIN and DAC_CLK as BDIV_CLKIN */
@@ -691,15 +704,37 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
 
        for (i = 0; i < ARRAY_SIZE(aic31xx_divs); i++) {
                if (aic31xx_divs[i].rate == params_rate(params) &&
-                   aic31xx_divs[i].mclk == aic31xx->sysclk)
-                       break;
+                   aic31xx_divs[i].mclk == aic31xx->sysclk) {
+                       int s = (aic31xx_divs[i].dosr * aic31xx_divs[i].mdac) %
+                               snd_soc_params_to_frame_size(params);
+                       int bn = (aic31xx_divs[i].dosr * aic31xx_divs[i].mdac) /
+                               snd_soc_params_to_frame_size(params);
+                       if (s < bclk_score && bn > 0) {
+                               match = i;
+                               bclk_n = bn;
+                               bclk_score = s;
+                       }
+               }
        }
 
-       if (i == ARRAY_SIZE(aic31xx_divs)) {
-               dev_err(codec->dev, "%s: Sampling rate %u not supported\n",
+       if (match == -1) {
+               dev_err(codec->dev,
+                       "%s: Sample rate (%u) and format not supported\n",
                        __func__, params_rate(params));
+               /* See bellow for details how fix this. */
                return -EINVAL;
        }
+       if (bclk_score != 0) {
+               dev_warn(codec->dev, "Can not produce exact bitclock");
+               /* This is fine if using dsp format, but if using i2s
+                  there may be trouble. To fix the issue edit the
+                  aic31xx_divs table for your mclk and sample
+                  rate. Details can be found from:
+                  http://www.ti.com/lit/ds/symlink/tlv320aic3100.pdf
+                  Section: 5.6 CLOCK Generation and PLL
+               */
+       }
+       i = match;
 
        /* PLL configuration */
        snd_soc_update_bits(codec, AIC31XX_PLLPR, AIC31XX_PLL_MASK,
@@ -729,14 +764,6 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
        snd_soc_write(codec, AIC31XX_AOSR, aic31xx_divs[i].aosr);
 
        /* Bit clock divider configuration. */
-       bclk_n = (aic31xx_divs[i].dosr * aic31xx_divs[i].mdac)
-               / snd_soc_params_to_frame_size(params);
-       if (bclk_n == 0) {
-               dev_err(codec->dev, "%s: Not enough BLCK bandwidth\n",
-                       __func__);
-               return -EINVAL;
-       }
-
        snd_soc_update_bits(codec, AIC31XX_BCLKN,
                            AIC31XX_PLL_MASK, bclk_n);
 
index 6a6b2ff..68347b5 100644 (file)
@@ -467,8 +467,17 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp,
 {
        u32 fmt;
        u32 tx_rotate = (word_length / 4) & 0x7;
-       u32 rx_rotate = (32 - word_length) / 4;
        u32 mask = (1ULL << word_length) - 1;
+       /*
+        * For captured data we should not rotate, inversion and masking is
+        * enoguh to get the data to the right position:
+        * Format         data from bus         after reverse (XRBUF)
+        * S16_LE:      |LSB|MSB|xxx|xxx|       |xxx|xxx|MSB|LSB|
+        * S24_3LE:     |LSB|DAT|MSB|xxx|       |xxx|MSB|DAT|LSB|
+        * S24_LE:      |LSB|DAT|MSB|xxx|       |xxx|MSB|DAT|LSB|
+        * S32_LE:      |LSB|DAT|DAT|MSB|       |MSB|DAT|DAT|LSB|
+        */
+       u32 rx_rotate = 0;
 
        /*
         * if s BCLK-to-LRCLK ratio has been configured via the set_clkdiv()
index 25c31f1..e961388 100644 (file)
@@ -4,7 +4,7 @@
  * sound/soc/dwc/designware_i2s.c
  *
  * Copyright (C) 2010 ST Microelectronics
- * Rajeev Kumar <rajeev-dlh.kumar@st.com>
+ * Rajeev Kumar <rajeevkumar.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -455,7 +455,7 @@ static struct platform_driver dw_i2s_driver = {
 
 module_platform_driver(dw_i2s_driver);
 
-MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
+MODULE_AUTHOR("Rajeev Kumar <rajeevkumar.linux@gmail.com>");
 MODULE_DESCRIPTION("DESIGNWARE I2S SoC Interface");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:designware_i2s");
index 8d8e4b5..fb9e05c 100644 (file)
@@ -165,13 +165,14 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
        struct rk_i2s_dev *i2s = to_info(cpu_dai);
        unsigned int mask = 0, val = 0;
 
-       mask = I2S_CKR_MSS_SLAVE;
+       mask = I2S_CKR_MSS_MASK;
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
        case SND_SOC_DAIFMT_CBS_CFS:
-               val = I2S_CKR_MSS_SLAVE;
+               /* Set source clock in Master mode */
+               val = I2S_CKR_MSS_MASTER;
                break;
        case SND_SOC_DAIFMT_CBM_CFM:
-               val = I2S_CKR_MSS_MASTER;
+               val = I2S_CKR_MSS_SLAVE;
                break;
        default:
                return -EINVAL;
@@ -361,6 +362,8 @@ static bool rockchip_i2s_rd_reg(struct device *dev, unsigned int reg)
        case I2S_XFER:
        case I2S_CLR:
        case I2S_RXDR:
+       case I2S_FIFOLR:
+       case I2S_INTSR:
                return true;
        default:
                return false;
@@ -370,8 +373,8 @@ static bool rockchip_i2s_rd_reg(struct device *dev, unsigned int reg)
 static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case I2S_FIFOLR:
        case I2S_INTSR:
+       case I2S_CLR:
                return true;
        default:
                return false;
@@ -381,8 +384,6 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
 static bool rockchip_i2s_precious_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case I2S_FIFOLR:
-               return true;
        default:
                return false;
        }
index 03eec22..9d51347 100644 (file)
@@ -462,7 +462,7 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
                if (dir == SND_SOC_CLOCK_IN)
                        rfs = 0;
 
-               if ((rfs && other->rfs && (other->rfs != rfs)) ||
+               if ((rfs && other && other->rfs && (other->rfs != rfs)) ||
                                (any_active(i2s) &&
                                (((dir == SND_SOC_CLOCK_IN)
                                        && !(mod & MOD_CDCLKCON)) ||
@@ -762,7 +762,8 @@ static void i2s_shutdown(struct snd_pcm_substream *substream,
        } else {
                u32 mod = readl(i2s->addr + I2SMOD);
                i2s->cdclk_out = !(mod & MOD_CDCLKCON);
-               other->cdclk_out = i2s->cdclk_out;
+               if (other)
+                       other->cdclk_out = i2s->cdclk_out;
        }
        /* Reset any constraint on RFS and BFS */
        i2s->rfs = 0;
index 27c06ac..3092b58 100644 (file)
@@ -101,7 +101,11 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
 
        fe->dpcm[stream].runtime = fe_substream->runtime;
 
-       if (dpcm_path_get(fe, stream, &list) <= 0) {
+       ret = dpcm_path_get(fe, stream, &list);
+       if (ret < 0) {
+               mutex_unlock(&fe->card->mutex);
+               goto fe_err;
+       } else if (ret == 0) {
                dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
                        fe->dai_link->name, stream ? "capture" : "playback");
        }
index 731fdb5..642c862 100644 (file)
@@ -2352,7 +2352,11 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
        mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
        fe->dpcm[stream].runtime = fe_substream->runtime;
 
-       if (dpcm_path_get(fe, stream, &list) <= 0) {
+       ret = dpcm_path_get(fe, stream, &list);
+       if (ret < 0) {
+               mutex_unlock(&fe->card->mutex);
+               return ret;
+       } else if (ret == 0) {
                dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
                        fe->dai_link->name, stream ? "capture" : "playback");
        }
index 0e5a8f3..a7dc3c5 100644 (file)
@@ -4,7 +4,7 @@
  * sound/soc/spear/spear_pcm.c
  *
  * Copyright (C) 2012 ST Microelectronics
- * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ * Rajeev Kumar<rajeevkumar.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -50,6 +50,6 @@ int devm_spear_pcm_platform_register(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(devm_spear_pcm_platform_register);
 
-MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
+MODULE_AUTHOR("Rajeev Kumar <rajeevkumar.linux@gmail.com>");
 MODULE_DESCRIPTION("SPEAr PCM DMA module");
 MODULE_LICENSE("GPL");
index f65fc09..b7a7c80 100644 (file)
@@ -100,15 +100,19 @@ static int control_put(struct snd_kcontrol *kcontrol,
        struct snd_usb_caiaqdev *cdev = caiaqdev(chip->card);
        int pos = kcontrol->private_value;
        int v = ucontrol->value.integer.value[0];
-       unsigned char cmd = EP1_CMD_WRITE_IO;
+       unsigned char cmd;
 
-       if (cdev->chip.usb_id ==
-               USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1))
-               cmd = EP1_CMD_DIMM_LEDS;
-
-       if (cdev->chip.usb_id ==
-               USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER))
+       switch (cdev->chip.usb_id) {
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER):
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2):
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER):
                cmd = EP1_CMD_DIMM_LEDS;
+               break;
+       default:
+               cmd = EP1_CMD_WRITE_IO;
+               break;
+       }
 
        if (pos & CNT_INTVAL) {
                int i = pos & ~CNT_INTVAL;
index 5a0e95e..15fe792 100644 (file)
@@ -15,7 +15,7 @@
 #include <syslog.h>
 #include <unistd.h>
 #include <linux/usb/ch9.h>
-#include "../../uapi/usbip.h"
+#include <linux/usbip.h>
 
 #ifndef USBIDS_FILE
 #define USBIDS_FILE "/usr/share/hwdata/usb.ids"
index 01124ef..416baed 100644 (file)
@@ -71,7 +71,7 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
                                  struct vgic_lr lr_desc)
 {
        if (!(lr_desc.state & LR_STATE_MASK))
-               set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
+               __set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
 }
 
 static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
index 33712fb..95519bc 100644 (file)
@@ -110,7 +110,7 @@ static bool largepages_enabled = true;
 bool kvm_is_mmio_pfn(pfn_t pfn)
 {
        if (pfn_valid(pfn))
-               return PageReserved(pfn_to_page(pfn));
+               return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
 
        return true;
 }
@@ -1725,7 +1725,7 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target)
        rcu_read_lock();
        pid = rcu_dereference(target->pid);
        if (pid)
-               task = get_pid_task(target->pid, PIDTYPE_PID);
+               task = get_pid_task(pid, PIDTYPE_PID);
        rcu_read_unlock();
        if (!task)
                return ret;