Merge branches 'for-4.4/upstream-fixes', 'for-4.5/async-suspend', 'for-4.5/container...
authorJiri Kosina <jkosina@suse.cz>
Thu, 14 Jan 2016 15:11:06 +0000 (16:11 +0100)
committerJiri Kosina <jkosina@suse.cz>
Thu, 14 Jan 2016 15:11:06 +0000 (16:11 +0100)
495 files changed:
Documentation/IPMI.txt
Documentation/arm/keystone/Overview.txt
Documentation/block/null_blk.txt
Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
Documentation/i2c/busses/i2c-i801
Documentation/kernel-parameters.txt
MAINTAINERS
Makefile
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/nsim_hs_defconfig
arch/arc/configs/nsim_hs_smp_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/irqflags-arcv2.h
arch/arc/include/asm/irqflags-compact.h
arch/arc/kernel/ctx_sw.c
arch/arc/kernel/ctx_sw_asm.S
arch/arc/kernel/process.c
arch/arc/kernel/unwind.c
arch/arc/mm/tlb.c
arch/arm/Kconfig
arch/arm/boot/dts/am57xx-beagle-x15.dts
arch/arm/boot/dts/animeo_ip.dts
arch/arm/boot/dts/at91-foxg20.dts
arch/arm/boot/dts/at91-kizbox.dts
arch/arm/boot/dts/at91-kizbox2.dts
arch/arm/boot/dts/at91-kizboxmini.dts
arch/arm/boot/dts/at91-qil_a9260.dts
arch/arm/boot/dts/at91-sama5d2_xplained.dts
arch/arm/boot/dts/at91-sama5d3_xplained.dts
arch/arm/boot/dts/at91-sama5d4_xplained.dts
arch/arm/boot/dts/at91-sama5d4ek.dts
arch/arm/boot/dts/at91rm9200ek.dts
arch/arm/boot/dts/at91sam9261ek.dts
arch/arm/boot/dts/at91sam9263ek.dts
arch/arm/boot/dts/at91sam9g20ek_common.dtsi
arch/arm/boot/dts/at91sam9m10g45ek.dts
arch/arm/boot/dts/at91sam9n12ek.dts
arch/arm/boot/dts/at91sam9rlek.dts
arch/arm/boot/dts/at91sam9x5cm.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/k2l-netcp.dtsi
arch/arm/boot/dts/kirkwood-ts219.dtsi
arch/arm/boot/dts/rk3288-veyron-minnie.dts
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/sama5d35ek.dts
arch/arm/boot/dts/sama5d4.dtsi
arch/arm/boot/dts/usb_a9260_common.dtsi
arch/arm/boot/dts/usb_a9263.dts
arch/arm/boot/dts/vfxxx.dtsi
arch/arm/configs/at91_dt_defconfig
arch/arm/configs/sama5_defconfig
arch/arm/include/asm/irq.h
arch/arm/include/uapi/asm/unistd.h
arch/arm/kernel/bios32.c
arch/arm/kernel/calls.S
arch/arm/kvm/arm.c
arch/arm/kvm/mmu.c
arch/arm/mach-dove/include/mach/entry-macro.S
arch/arm/mach-imx/gpc.c
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_7xx_data.c
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-orion5x/include/mach/entry-macro.S
arch/arm/mach-pxa/palm27x.c
arch/arm/mach-pxa/palmtc.c
arch/arm/mach-shmobile/setup-r8a7793.c
arch/arm/mach-zx/Kconfig
arch/arm64/Kconfig
arch/arm64/crypto/aes-ce-cipher.c
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/compat.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/dma-mapping.h
arch/arm64/include/asm/hw_breakpoint.h
arch/arm64/include/asm/irq.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/pgtable.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/suspend.c
arch/arm64/kvm/hyp.S
arch/arm64/kvm/inject_fault.c
arch/arm64/mm/context.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/fault.c
arch/arm64/mm/mmu.c
arch/m68k/coldfire/m54xx.c
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/setup_no.c
arch/m68k/kernel/syscalltable.S
arch/m68k/mm/motorola.c
arch/m68k/sun3/config.c
arch/mips/ath79/setup.c
arch/mips/boot/dts/qca/ar9132.dtsi
arch/mips/include/asm/page.h
arch/mips/kvm/emulate.c
arch/mips/kvm/locore.S
arch/mips/kvm/mips.c
arch/mips/pci/pci-rt2880.c
arch/mips/pmcs-msp71xx/msp_setup.c
arch/mips/sni/reset.c
arch/mn10300/Kconfig
arch/nios2/mm/cacheflush.c
arch/parisc/Kconfig
arch/parisc/include/asm/hugetlb.h [new file with mode: 0644]
arch/parisc/include/asm/page.h
arch/parisc/include/asm/pgalloc.h
arch/parisc/include/asm/pgtable.h
arch/parisc/include/asm/processor.h
arch/parisc/include/uapi/asm/mman.h
arch/parisc/kernel/asm-offsets.c
arch/parisc/kernel/entry.S
arch/parisc/kernel/head.S
arch/parisc/kernel/setup.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/traps.c
arch/parisc/kernel/vmlinux.lds.S
arch/parisc/mm/Makefile
arch/parisc/mm/hugetlbpage.c [new file with mode: 0644]
arch/parisc/mm/init.c
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/process.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/kvm/sigp.c
arch/x86/include/asm/msr-index.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/mcount_64.S
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/mpx.c
block/blk-merge.c
block/blk-mq.c
block/blk-timeout.c
block/blk.h
block/noop-iosched.c
block/partitions/mac.c
drivers/Makefile
drivers/acpi/cppc_acpi.c
drivers/acpi/ec.c
drivers/acpi/sbshc.c
drivers/base/power/wakeirq.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/null_blk.c
drivers/bus/omap-ocp2scp.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/clocksource/Kconfig
drivers/clocksource/fsl_ftm_timer.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/cppc_cpufreq.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/dma/at_hdmac.c
drivers/dma/at_hdmac_regs.h
drivers/dma/at_xdmac.c
drivers/dma/edma.c
drivers/dma/imx-sdma.c
drivers/dma/sh/usb-dmac.c
drivers/gpio/gpio-74xx-mmio.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-palmas.c
drivers/gpio/gpio-syscon.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drivers/gpu/drm/amd/scheduler/sched_fence.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/mgag200/mgag200_cursor.c
drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_drm.h
drivers/gpu/drm/nouveau/nouveau_usif.c
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/rv730_dpm.c
drivers/gpu/drm/radeon/rv770_dpm.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_hvs.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/hid/hid-core.c
drivers/hid/hid-corsair.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-debug.c
drivers/hid/hid-gt683r.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-lenovo.c
drivers/hid/hid-lg.c
drivers/hid/hid-lg4ff.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-ntrig.c
drivers/hid/hid-picolcd_leds.c
drivers/hid/hid-prodikeys.c
drivers/hid/hid-roccat-arvo.c
drivers/hid/hid-roccat-common.c
drivers/hid/hid-roccat-isku.c
drivers/hid/hid-roccat-kone.c
drivers/hid/hid-roccat-koneplus.c
drivers/hid/hid-roccat-kovaplus.c
drivers/hid/hid-roccat-lua.c
drivers/hid/hid-roccat-pyra.c
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-sony.c
drivers/hid/hid-steelseries.c
drivers/hid/hid-wiimote-modules.c
drivers/hid/hid-wiimote.h
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/usbhid/usbhid.h
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hid/wacom_wac.h
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-xiic.c
drivers/i2c/i2c-core.c
drivers/iio/adc/ad7793.c
drivers/iio/adc/vf610_adc.c
drivers/iio/adc/xilinx-xadc-core.c
drivers/iio/dac/ad5064.c
drivers/iio/humidity/si7020.c
drivers/irqchip/irq-gic-common.c
drivers/irqchip/irq-gic.c
drivers/lightnvm/core.c
drivers/lightnvm/gennvm.c
drivers/lightnvm/gennvm.h
drivers/lightnvm/rrpc.c
drivers/md/dm-crypt.c
drivers/md/dm-mpath.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/media/pci/cx23885/cx23885-core.c
drivers/media/pci/cx25821/cx25821-core.c
drivers/media/pci/cx88/cx88-alsa.c
drivers/media/pci/cx88/cx88-mpeg.c
drivers/media/pci/cx88/cx88-video.c
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
drivers/media/pci/saa7134/saa7134-core.c
drivers/media/pci/saa7164/saa7164-core.c
drivers/media/pci/tw68/tw68-core.c
drivers/mtd/nand/jz4740_nand.c
drivers/mtd/nand/nand_base.c
drivers/net/ethernet/amd/pcnet32.c
drivers/nvme/host/lightnvm.c
drivers/nvme/host/pci.c
drivers/pci/host/pcie-designware.c
drivers/pci/host/pcie-hisi.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.h
drivers/pci/probe.c
drivers/pinctrl/Kconfig
drivers/pinctrl/freescale/pinctrl-imx1-core.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
drivers/pinctrl/sh-pfc/pfc-sh7734.c
drivers/remoteproc/remoteproc_core.c
drivers/remoteproc/remoteproc_debugfs.c
drivers/rtc/rtc-ds1307.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/sh/pm_runtime.c
drivers/soc/mediatek/Kconfig
drivers/soc/ti/knav_qmss_queue.c
drivers/staging/iio/Kconfig
drivers/staging/iio/adc/lpc32xx_adc.c
drivers/staging/wilc1000/coreconfigurator.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/target_core_sbc.c
drivers/target/target_core_stat.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/thermal/Kconfig
drivers/thermal/imx_thermal.c
drivers/thermal/of-thermal.c
drivers/thermal/power_allocator.c
drivers/thermal/rcar_thermal.c
drivers/thermal/rockchip_thermal.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_fsl.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/Kconfig
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/serial/etraxfs-uart.c
drivers/tty/tty_audit.c
drivers/tty/tty_io.c
drivers/tty/tty_ioctl.c
drivers/tty/tty_ldisc.c
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/chipidea/debug.c
drivers/usb/chipidea/udc.c
drivers/usb/chipidea/usbmisc_imx.c
drivers/usb/class/usblp.c
drivers/usb/core/Kconfig
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_loopback.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_host.c
drivers/usb/phy/Kconfig
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/phy/phy-omap-otg.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/ti_usb_3410_5052.h
drivers/watchdog/Kconfig
drivers/watchdog/mtk_wdt.c
drivers/watchdog/omap_wdt.c
drivers/watchdog/pnx4008_wdt.c
drivers/watchdog/tegra_wdt.c
drivers/watchdog/w83977f_wdt.c
drivers/xen/events/events_base.c
drivers/xen/evtchn.c
drivers/xen/gntdev.c
fs/Kconfig
fs/block_dev.c
fs/btrfs/backref.c
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/qgroup.c
fs/btrfs/scrub.c
fs/btrfs/tests/free-space-tests.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/configfs/dir.c
fs/dax.c
fs/ext2/super.c
fs/ext4/super.c
fs/fat/dir.c
fs/hugetlbfs/inode.c
fs/ncpfs/ioctl.c
fs/nfs/callback_xdr.c
fs/nfs/inode.c
fs/nfs/nfs42proc.c
fs/nfs/nfs4client.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfs/pnfs.c
fs/ocfs2/namei.c
fs/splice.c
fs/sysv/inode.c
include/drm/drm_atomic.h
include/kvm/arm_vgic.h
include/linux/blkdev.h
include/linux/configfs.h
include/linux/gfp.h
include/linux/hid.h
include/linux/kref.h
include/linux/kvm_host.h
include/linux/lightnvm.h
include/linux/nfs_xdr.h
include/linux/of_dma.h
include/linux/pci.h
include/linux/scpi_protocol.h
include/linux/signal.h
include/linux/slab.h
include/linux/syscalls.h
include/linux/thermal.h
include/linux/tty.h
include/linux/types.h
include/target/target_core_base.h
include/uapi/linux/nfs.h
kernel/livepatch/core.c
kernel/panic.c
kernel/pid.c
kernel/signal.c
kernel/trace/ring_buffer.c
mm/huge_memory.c
mm/kasan/kasan.c
mm/memory.c
mm/page-writeback.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slob.c
mm/slub.c
mm/vmalloc.c
net/sunrpc/backchannel_rqst.c
net/sunrpc/svc.c
scripts/kernel-doc
security/keys/encrypted-keys/encrypted.c
security/keys/trusted.c
security/keys/user_defined.c
security/selinux/ss/conditional.c
sound/firewire/dice/dice.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/usb/midi.c
sound/usb/quirks-table.h
sound/usb/quirks.c
sound/usb/usbaudio.h
tools/Makefile
tools/perf/builtin-inject.c
tools/perf/builtin-report.c
tools/perf/ui/browsers/hists.c
tools/perf/util/build-id.c
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/machine.c
tools/perf/util/probe-finder.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/futex/README
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/vm/page-types.c
virt/kvm/arm/arch_timer.c
virt/kvm/arm/vgic.c

index 31d1d65..c0d8788 100644 (file)
@@ -587,7 +587,7 @@ used to control it:
 
   modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
       preaction=<preaction type> preop=<preop type> start_now=x
-      nowayout=x ifnum_to_use=n
+      nowayout=x ifnum_to_use=n panic_wdt_timeout=<t>
 
 ifnum_to_use specifies which interface the watchdog timer should use.
 The default is -1, which means to pick the first one registered.
@@ -597,7 +597,9 @@ is the amount of seconds before the reset that the pre-timeout panic will
 occur (if pretimeout is zero, then pretimeout will not be enabled).  Note
 that the pretimeout is the time before the final timeout.  So if the
 timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout
-will occur in 40 second (10 seconds before the timeout).
+will occur in 40 second (10 seconds before the timeout). The panic_wdt_timeout
+is the value of timeout which is set on kernel panic, in order to let actions
+such as kdump to occur during panic.
 
 The action may be "reset", "power_cycle", or "power_off", and
 specifies what to do when the timer times out, and defaults to
@@ -634,6 +636,7 @@ for configuring the watchdog:
        ipmi_watchdog.preop=<preop type>
        ipmi_watchdog.start_now=x
        ipmi_watchdog.nowayout=x
+       ipmi_watchdog.panic_wdt_timeout=<t>
 
 The options are the same as the module parameter options.
 
index f17bc4c..400c0c2 100644 (file)
@@ -49,24 +49,6 @@ specified through DTS. Following are the DTS used:-
 The device tree documentation for the keystone machines are located at
         Documentation/devicetree/bindings/arm/keystone/keystone.txt
 
-Known issues & workaround
--------------------------
-
-Some of the device drivers used on keystone are re-used from that from
-DaVinci and other TI SoCs. These device drivers may use clock APIs directly.
-Some of the keystone specific drivers such as netcp uses run time power
-management API instead to enable clock. As this API has limitations on
-keystone, following workaround is needed to boot Linux.
-
-   Add 'clk_ignore_unused' to the bootargs env variable in u-boot. Otherwise
-   clock frameworks will try to disable clocks that are unused and disable
-   the hardware. This is because netcp related power domain and clock
-   domains are enabled in u-boot as run time power management API currently
-   doesn't enable clocks for netcp due to a limitation. This workaround is
-   expected to be removed in the future when proper API support becomes
-   available. Until then, this work around is needed.
-
-
 Document Author
 ---------------
 Murali Karicheri <m-karicheri2@ti.com>
index 2f6c6ff..d8880ca 100644 (file)
@@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0
      parameter.
   1: The multi-queue block layer is instantiated with a hardware dispatch
      queue for each CPU node in the system.
+
+use_lightnvm=[0/1]: Default: 0
+  Register device with LightNVM. Requires blk-mq to be used.
index b38200d..0dfa60d 100644 (file)
@@ -1,7 +1,9 @@
 * Temperature Sensor ADC (TSADC) on rockchip SoCs
 
 Required properties:
-- compatible : "rockchip,rk3288-tsadc"
+- compatible : should be "rockchip,<name>-tsadc"
+   "rockchip,rk3288-tsadc": found on RK3288 SoCs
+   "rockchip,rk3368-tsadc": found on RK3368 SoCs
 - reg : physical base address of the controller and length of memory mapped
        region.
 - interrupts : The interrupt number to the cpu. The interrupt specifier format
index 6a4b1af..1bba38d 100644 (file)
@@ -32,6 +32,7 @@ Supported adapters:
   * Intel Sunrise Point-LP (PCH)
   * Intel DNV (SOC)
   * Intel Broxton (SOC)
+  * Intel Lewisburg (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
index f8aae63..742f69d 100644 (file)
@@ -1583,9 +1583,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                hwp_only
                        Only load intel_pstate on systems which support
                        hardware P state control (HWP) if available.
-               no_acpi
-                       Don't use ACPI processor performance control objects
-                       _PSS and _PPC specified limits.
 
        intremap=       [X86-64, Intel-IOMMU]
                        on      enable Interrupt Remapping (default)
index b16bffa..b81565b 100644 (file)
@@ -1931,7 +1931,7 @@ S:        Supported
 F:     drivers/i2c/busses/i2c-at91.c
 
 ATMEL ISI DRIVER
-M:     Josh Wu <josh.wu@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@atmel.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 F:     drivers/media/platform/soc_camera/atmel-isi.c
@@ -1950,7 +1950,8 @@ S:        Supported
 F:     drivers/net/ethernet/cadence/
 
 ATMEL NAND DRIVER
-M:     Josh Wu <josh.wu@atmel.com>
+M:     Wenyou Yang <wenyou.yang@atmel.com>
+M:     Josh Wu <rainyfeeling@outlook.com>
 L:     linux-mtd@lists.infradead.org
 S:     Supported
 F:     drivers/mtd/nand/atmel_nand*
@@ -2449,7 +2450,9 @@ F:        drivers/firmware/broadcom/*
 
 BROADCOM STB NAND FLASH DRIVER
 M:     Brian Norris <computersforpeace@gmail.com>
+M:     Kamal Dasu <kdasu.kdev@gmail.com>
 L:     linux-mtd@lists.infradead.org
+L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/mtd/nand/brcmnand/
 
@@ -2929,10 +2932,9 @@ S:       Maintained
 F:     drivers/platform/x86/compal-laptop.c
 
 CONEXANT ACCESSRUNNER USB DRIVER
-M:     Simon Arlott <cxacru@fire.lp0.eu>
 L:     accessrunner-general@lists.sourceforge.net
 W:     http://accessrunner.sourceforge.net/
-S:     Maintained
+S:     Orphan
 F:     drivers/usb/atm/cxacru.c
 
 CONFIGFS
@@ -4409,6 +4411,7 @@ K:        fmc_d.*register
 
 FPGA MANAGER FRAMEWORK
 M:     Alan Tull <atull@opensource.altera.com>
+R:     Moritz Fischer <moritz.fischer@ettus.com>
 S:     Maintained
 F:     drivers/fpga/
 F:     include/linux/fpga/fpga-mgr.h
@@ -4981,6 +4984,7 @@ F:        arch/*/include/asm/suspend*.h
 
 HID CORE LAYER
 M:     Jiri Kosina <jikos@kernel.org>
+R:     Benjamin Tissoires <benjamin.tissoires@redhat.com>
 L:     linux-input@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
 S:     Maintained
@@ -6364,6 +6368,7 @@ F:        arch/*/include/asm/pmem.h
 LIGHTNVM PLATFORM SUPPORT
 M:     Matias Bjorling <mb@lightnvm.io>
 W:     http://github/OpenChannelSSD
+L:     linux-block@vger.kernel.org
 S:     Maintained
 F:     drivers/lightnvm/
 F:     include/linux/lightnvm.h
@@ -7902,6 +7907,18 @@ S:       Maintained
 F:     net/openvswitch/
 F:     include/uapi/linux/openvswitch.h
 
+OPERATING PERFORMANCE POINTS (OPP)
+M:     Viresh Kumar <vireshk@kernel.org>
+M:     Nishanth Menon <nm@ti.com>
+M:     Stephen Boyd <sboyd@codeaurora.org>
+L:     linux-pm@vger.kernel.org
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
+F:     drivers/base/power/opp/
+F:     include/linux/pm_opp.h
+F:     Documentation/power/opp.txt
+F:     Documentation/devicetree/bindings/opp/
+
 OPL4 DRIVER
 M:     Clemens Ladisch <clemens@ladisch.de>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -11081,6 +11098,7 @@ F:      include/linux/usb/gadget*
 
 USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
 M:     Jiri Kosina <jikos@kernel.org>
+R:     Benjamin Tissoires <benjamin.tissoires@redhat.com>
 L:     linux-usb@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
 S:     Maintained
index 3a0234f..904a1d6 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
 NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
index c92c0ef..f1ac981 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
index cfac24e..323486d 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
index 9922a11..66191cd 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
index f761a7c..f68838e 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
index dc6f74f..96bd1c2 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
index 3fef0a2..fcae666 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
index 5178483..b01b659 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
index ef35ef3..a07f20d 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_CROSS_MEMORY_ATTACH is not set
index 634509e..f36c047 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_CROSS_MEMORY_ATTACH is not set
index ad481c2..258b0e5 100644 (file)
@@ -37,6 +37,9 @@
 #define ISA_INIT_STATUS_BITS   (STATUS_IE_MASK | STATUS_AD_MASK | \
                                        (ARCV2_IRQ_DEF_PRIO << 1))
 
+/* SLEEP needs default irq priority (<=) which can interrupt the doze */
+#define ISA_SLEEP_ARG          (0x10 | ARCV2_IRQ_DEF_PRIO)
+
 #ifndef __ASSEMBLY__
 
 /*
index d8c6081..c1d3645 100644 (file)
@@ -43,6 +43,8 @@
 
 #define ISA_INIT_STATUS_BITS   STATUS_IE_MASK
 
+#define ISA_SLEEP_ARG          0x3
+
 #ifndef __ASSEMBLY__
 
 /******************************************************************
index c14a5be..5d446df 100644 (file)
@@ -58,8 +58,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
                "st      sp, [r24]       \n\t"
 #endif
 
-               "sync   \n\t"
-
                /*
                 * setup _current_task with incoming tsk.
                 * optionally, set r25 to that as well
index e248594..e6890b1 100644 (file)
@@ -44,9 +44,6 @@ __switch_to:
        * don't need to do anything special to return it
        */
 
-       /* hardware memory barrier */
-       sync
-
        /*
         * switch to new task, contained in r1
         * Temp reg r3 is required to get the ptr to store val
index 91d5a0f..a3f750e 100644 (file)
@@ -44,11 +44,10 @@ SYSCALL_DEFINE0(arc_gettls)
 void arch_cpu_idle(void)
 {
        /* sleep, but enable all interrupts before committing */
-       if (is_isa_arcompact()) {
-               __asm__("sleep 0x3");
-       } else {
-               __asm__("sleep 0x10");
-       }
+       __asm__ __volatile__(
+               "sleep %0       \n"
+               :
+               :"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */
 }
 
 asmlinkage void ret_from_fork(void);
index 93c6ea5..7352475 100644 (file)
@@ -986,42 +986,13 @@ int arc_unwind(struct unwind_frame_info *frame)
                                                            (const u8 *)(fde +
                                                                         1) +
                                                            *fde, ptrType);
-                               if (pc >= endLoc)
+                               if (pc >= endLoc) {
                                        fde = NULL;
-                       } else
-                               fde = NULL;
-               }
-               if (fde == NULL) {
-                       for (fde = table->address, tableSize = table->size;
-                            cie = NULL, tableSize > sizeof(*fde)
-                            && tableSize - sizeof(*fde) >= *fde;
-                            tableSize -= sizeof(*fde) + *fde,
-                            fde += 1 + *fde / sizeof(*fde)) {
-                               cie = cie_for_fde(fde, table);
-                               if (cie == &bad_cie) {
                                        cie = NULL;
-                                       break;
                                }
-                               if (cie == NULL
-                                   || cie == &not_fde
-                                   || (ptrType = fde_pointer_type(cie)) < 0)
-                                       continue;
-                               ptr = (const u8 *)(fde + 2);
-                               startLoc = read_pointer(&ptr,
-                                                       (const u8 *)(fde + 1) +
-                                                       *fde, ptrType);
-                               if (!startLoc)
-                                       continue;
-                               if (!(ptrType & DW_EH_PE_indirect))
-                                       ptrType &=
-                                           DW_EH_PE_FORM | DW_EH_PE_signed;
-                               endLoc =
-                                   startLoc + read_pointer(&ptr,
-                                                           (const u8 *)(fde +
-                                                                        1) +
-                                                           *fde, ptrType);
-                               if (pc >= startLoc && pc < endLoc)
-                                       break;
+                       } else {
+                               fde = NULL;
+                               cie = NULL;
                        }
                }
        }
index 0ee7398..daf2bf5 100644 (file)
@@ -619,10 +619,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
 
                int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
                if (dirty) {
-                       /* wback + inv dcache lines */
+                       /* wback + inv dcache lines (K-mapping) */
                        __flush_dcache_page(paddr, paddr);
 
-                       /* invalidate any existing icache lines */
+                       /* invalidate any existing icache lines (U-mapping) */
                        if (vma->vm_flags & VM_EXEC)
                                __inv_icache_page(paddr, vaddr);
                }
index 0365cbb..34e1569 100644 (file)
@@ -76,6 +76,8 @@ config ARM
        select IRQ_FORCED_THREADING
        select MODULES_USE_ELF_REL
        select NO_BOOTMEM
+       select OF_EARLY_FLATTREE if OF
+       select OF_RESERVED_MEM if OF
        select OLD_SIGACTION
        select OLD_SIGSUSPEND3
        select PERF_USE_VMALLOC
@@ -1822,8 +1824,6 @@ config USE_OF
        bool "Flattened Device Tree support"
        select IRQ_DOMAIN
        select OF
-       select OF_EARLY_FLATTREE
-       select OF_RESERVED_MEM
        help
          Include support for flattened device tree machine descriptions.
 
index d9ba6b8..00352e7 100644 (file)
                reg = <0x6f>;
                interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
                                      <&dra7_pmx_core 0x424>;
+               interrupt-names = "irq", "wakeup";
 
                pinctrl-names = "default";
                pinctrl-0 = <&mcp79410_pins_default>;
index 4e0ad3b..0962f2f 100644 (file)
                        label = "keyswitch_in";
                        gpios = <&pioB 1 GPIO_ACTIVE_HIGH>;
                        linux,code = <28>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                error_in {
                        label = "error_in";
                        gpios = <&pioB 2 GPIO_ACTIVE_HIGH>;
                        linux,code = <29>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                btn {
                        label = "btn";
                        gpios = <&pioC 23 GPIO_ACTIVE_HIGH>;
                        linux,code = <31>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
index f89598a..6bf873e 100644 (file)
                        label = "Button";
                        gpios = <&pioC 4 GPIO_ACTIVE_LOW>;
                        linux,code = <0x103>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
index bf18ece..229e989 100644 (file)
        };
 
        clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock@0 {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <18432000>;
-               };
-
                main_xtal {
                        clock-frequency = <18432000>;
                };
                        label = "PB_RST";
                        gpios = <&pioB 30 GPIO_ACTIVE_HIGH>;
                        linux,code = <0x100>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                user {
                        label = "PB_USER";
                        gpios = <&pioB 31 GPIO_ACTIVE_HIGH>;
                        linux,code = <0x101>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index f0b1563..50a1456 100644 (file)
                        label = "PB_PROG";
                        gpios = <&pioE 27 GPIO_ACTIVE_LOW>;
                        linux,code = <0x102>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                reset {
                        label = "PB_RST";
                        gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
                        linux,code = <0x100>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                user {
                        label = "PB_USER";
                        gpios = <&pioE 31 GPIO_ACTIVE_HIGH>;
                        linux,code = <0x101>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 9f72b49..9682d10 100644 (file)
                        label = "PB_PROG";
                        gpios = <&pioC 17 GPIO_ACTIVE_LOW>;
                        linux,code = <0x102>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                reset {
                        label = "PB_RST";
                        gpios = <&pioC 16 GPIO_ACTIVE_LOW>;
                        linux,code = <0x100>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index a9aef53..4f2eebf 100644 (file)
                        label = "user_pb";
                        gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
                        linux,code = <28>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index e07c2b2..ad6de73 100644 (file)
@@ -45,6 +45,7 @@
 /dts-v1/;
 #include "sama5d2.dtsi"
 #include "sama5d2-pinfunc.h"
+#include <dt-bindings/mfd/atmel-flexcom.h>
 
 / {
        model = "Atmel SAMA5D2 Xplained";
        };
 
        clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock@0 {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <12000000>;
-               };
-
                slow_xtal {
                        clock-frequency = <32768>;
                };
                        status = "okay";
                };
 
+               sdmmc0: sdio-host@a0000000 {
+                       bus-width = <8>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_sdmmc0_default>;
+                       non-removable;
+                       mmc-ddr-1_8v;
+                       status = "okay";
+               };
+
+               sdmmc1: sdio-host@b0000000 {
+                       bus-width = <4>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_sdmmc1_default>;
+                       status = "okay"; /* conflict with qspi0 */
+               };
+
                apb {
                        spi0: spi@f8000000 {
                                pinctrl-names = "default";
                                };
                        };
 
+                       flx0: flexcom@f8034000 {
+                               atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
+                               status = "disabled"; /* conflict with ISC_D2 & ISC_D3 data pins */
+
+                               uart5: serial@200 {
+                                       compatible = "atmel,at91sam9260-usart";
+                                       reg = <0x200 0x200>;
+                                       interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
+                                       clocks = <&flx0_clk>;
+                                       clock-names = "usart";
+                                       pinctrl-names = "default";
+                                       pinctrl-0 = <&pinctrl_flx0_default>;
+                                       atmel,fifo-size = <32>;
+                                       status = "okay";
+                               };
+                       };
+
                        uart3: serial@fc008000 {
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_uart3_default>;
                                status = "okay";
                        };
 
+                       flx4: flexcom@fc018000 {
+                               atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
+                               status = "okay";
+
+                               i2c2: i2c@600 {
+                                       compatible = "atmel,sama5d2-i2c";
+                                       reg = <0x600 0x200>;
+                                       interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
+                                       dmas = <0>, <0>;
+                                       dma-names = "tx", "rx";
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+                                       clocks = <&flx4_clk>;
+                                       pinctrl-names = "default";
+                                       pinctrl-0 = <&pinctrl_flx4_default>;
+                                       atmel,fifo-size = <16>;
+                                       status = "okay";
+                               };
+                       };
+
                        i2c1: i2c@fc028000 {
                                dmas = <0>, <0>;
                                pinctrl-names = "default";
                        };
 
                        pinctrl@fc038000 {
+                               pinctrl_flx0_default: flx0_default {
+                                       pinmux = <PIN_PB28__FLEXCOM0_IO0>,
+                                                <PIN_PB29__FLEXCOM0_IO1>;
+                                       bias-disable;
+                               };
+
+                               pinctrl_flx4_default: flx4_default {
+                                       pinmux = <PIN_PD12__FLEXCOM4_IO0>,
+                                                <PIN_PD13__FLEXCOM4_IO1>;
+                                       bias-disable;
+                               };
+
                                pinctrl_i2c0_default: i2c0_default {
                                        pinmux = <PIN_PD21__TWD0>,
                                                 <PIN_PD22__TWCK0>;
                                        bias-disable;
                                };
 
+                               pinctrl_sdmmc0_default: sdmmc0_default {
+                                       cmd_data {
+                                               pinmux = <PIN_PA1__SDMMC0_CMD>,
+                                                        <PIN_PA2__SDMMC0_DAT0>,
+                                                        <PIN_PA3__SDMMC0_DAT1>,
+                                                        <PIN_PA4__SDMMC0_DAT2>,
+                                                        <PIN_PA5__SDMMC0_DAT3>,
+                                                        <PIN_PA6__SDMMC0_DAT4>,
+                                                        <PIN_PA7__SDMMC0_DAT5>,
+                                                        <PIN_PA8__SDMMC0_DAT6>,
+                                                        <PIN_PA9__SDMMC0_DAT7>;
+                                               bias-pull-up;
+                                       };
+
+                                       ck_cd_rstn_vddsel {
+                                               pinmux = <PIN_PA0__SDMMC0_CK>,
+                                                        <PIN_PA10__SDMMC0_RSTN>,
+                                                        <PIN_PA11__SDMMC0_VDDSEL>,
+                                                        <PIN_PA13__SDMMC0_CD>;
+                                               bias-disable;
+                                       };
+                               };
+
+                               pinctrl_sdmmc1_default: sdmmc1_default {
+                                       cmd_data {
+                                               pinmux = <PIN_PA28__SDMMC1_CMD>,
+                                                        <PIN_PA18__SDMMC1_DAT0>,
+                                                        <PIN_PA19__SDMMC1_DAT1>,
+                                                        <PIN_PA20__SDMMC1_DAT2>,
+                                                        <PIN_PA21__SDMMC1_DAT3>;
+                                               bias-pull-up;
+                                       };
+
+                                       conf-ck_cd {
+                                               pinmux = <PIN_PA22__SDMMC1_CK>,
+                                                        <PIN_PA30__SDMMC1_CD>;
+                                               bias-disable;
+                                       };
+                               };
+
                                pinctrl_spi0_default: spi0_default {
                                        pinmux = <PIN_PA14__SPI0_SPCK>,
                                                 <PIN_PA15__SPI0_MOSI>,
index 8488ac5..ff888d2 100644 (file)
                        label = "PB_USER";
                        gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
                        linux,code = <0x104>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 45371a1..131614f 100644 (file)
@@ -50,7 +50,6 @@
        compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5";
 
        chosen {
-               bootargs = "ignore_loglevel earlyprintk";
                stdout-path = "serial0:115200n8";
        };
 
        };
 
        clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock@0 {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <12000000>;
-               };
-
                slow_xtal {
                        clock-frequency = <32768>;
                };
                        label = "pb_user1";
                        gpios = <&pioE 8 GPIO_ACTIVE_HIGH>;
                        linux,code = <0x100>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 6d272c0..2d4a331 100644 (file)
@@ -50,7 +50,6 @@
        compatible = "atmel,sama5d4ek", "atmel,sama5d4", "atmel,sama5";
 
        chosen {
-               bootargs = "ignore_loglevel earlyprintk";
                stdout-path = "serial0:115200n8";
        };
 
        };
 
        clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock@0 {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <12000000>;
-               };
-
                slow_xtal {
                        clock-frequency = <32768>;
                };
                        label = "pb_user1";
                        gpios = <&pioE 13 GPIO_ACTIVE_HIGH>;
                        linux,code = <0x100>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 8dab4b7..f90e1c2 100644 (file)
        };
 
        clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock@0 {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <18432000>;
-               };
-
                slow_xtal {
                        clock-frequency = <32768>;
                };
index 2e92ac0..55bd51f 100644 (file)
        };
 
        clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock@0 {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <18432000>;
-               };
-
                slow_xtal {
                        clock-frequency = <32768>;
                };
                                        ti,debounce-tol = /bits/ 16 <65535>;
                                        ti,debounce-max = /bits/ 16 <1>;
 
-                                       linux,wakeup;
+                                       wakeup-source;
                                };
                        };
 
                        label = "button_0";
                        gpios = <&pioA 27 GPIO_ACTIVE_LOW>;
                        linux,code = <256>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                button_1 {
                        label = "button_1";
                        gpios = <&pioA 26 GPIO_ACTIVE_LOW>;
                        linux,code = <257>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                button_2 {
                        label = "button_2";
                        gpios = <&pioA 25 GPIO_ACTIVE_LOW>;
                        linux,code = <258>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                button_3 {
                        label = "button_3";
                        gpios = <&pioA 24 GPIO_ACTIVE_LOW>;
                        linux,code = <259>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
index 2338127..59df9d7 100644 (file)
        };
 
        clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock@0 {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <16367660>;
-               };
-
                slow_xtal {
                        clock-frequency = <32768>;
                };
                        label = "left_click";
                        gpios = <&pioC 5 GPIO_ACTIVE_LOW>;
                        linux,code = <272>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                right_click {
                        label = "right_click";
                        gpios = <&pioC 4 GPIO_ACTIVE_LOW>;
                        linux,code = <273>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 57548a2..e9cc99b 100644 (file)
        };
 
        clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock@0 {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <18432000>;
-               };
-
                slow_xtal {
                        clock-frequency = <32768>;
                };
                        label = "Button 3";
                        gpios = <&pioA 30 GPIO_ACTIVE_LOW>;
                        linux,code = <0x103>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                btn4 {
                        label = "Button 4";
                        gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
                        linux,code = <0x104>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 9d16ef8..2400c99 100644 (file)
        };
 
        clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock@0 {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <12000000>;
-               };
-
                slow_xtal {
                      clock-frequency = <32768>;
                };
                        label = "left_click";
                        gpios = <&pioB 6 GPIO_ACTIVE_LOW>;
                        linux,code = <272>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                right_click {
                        label = "right_click";
                        gpios = <&pioB 7 GPIO_ACTIVE_LOW>;
                        linux,code = <273>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                left {
index acf3451..ca4ddf8 100644 (file)
        };
 
        clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock@0 {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <16000000>;
-               };
-
                slow_xtal {
                        clock-frequency = <32768>;
                };
                        label = "Enter";
                        gpios = <&pioB 3 GPIO_ACTIVE_LOW>;
                        linux,code = <28>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 558c9f2..f10566f 100644 (file)
        };
 
        clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <12000000>;
-               };
-
                slow_xtal {
                        clock-frequency = <32768>;
                };
                        label = "right_click";
                        gpios = <&pioB 0 GPIO_ACTIVE_LOW>;
                        linux,code = <273>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                left_click {
                        label = "left_click";
                        gpios = <&pioB 1 GPIO_ACTIVE_LOW>;
                        linux,code = <272>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 26112eb..b098ad8 100644 (file)
                reg = <0x20000000 0x8000000>;
        };
 
-       clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               main_clock: clock@0 {
-                       compatible = "atmel,osc", "fixed-clock";
-                       clock-frequency = <12000000>;
-               };
-       };
-
        clocks {
                slow_xtal {
                        clock-frequency = <32768>;
index bc672fb..fe99231 100644 (file)
                        interrupt-names = "tx", "rx";
                        dmas = <&sdma_xbar 133>, <&sdma_xbar 132>;
                        dma-names = "tx", "rx";
-                       clocks = <&mcasp3_ahclkx_mux>;
-                       clock-names = "fck";
+                       clocks = <&mcasp3_aux_gfclk_mux>, <&mcasp3_ahclkx_mux>;
+                       clock-names = "fck", "ahclkx";
                        status = "disabled";
                };
 
index feb9d34..f818ea4 100644 (file)
                                compatible = "fsl,imx27-usb";
                                reg = <0x10024000 0x200>;
                                interrupts = <56>;
-                               clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
+                               clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
+                                       <&clks IMX27_CLK_USB_AHB_GATE>,
+                                       <&clks IMX27_CLK_USB_DIV>;
+                               clock-names = "ipg", "ahb", "per";
                                fsl,usbmisc = <&usbmisc 0>;
                                status = "disabled";
                        };
                                compatible = "fsl,imx27-usb";
                                reg = <0x10024200 0x200>;
                                interrupts = <54>;
-                               clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
+                               clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
+                                       <&clks IMX27_CLK_USB_AHB_GATE>,
+                                       <&clks IMX27_CLK_USB_DIV>;
+                               clock-names = "ipg", "ahb", "per";
                                fsl,usbmisc = <&usbmisc 1>;
                                dr_mode = "host";
                                status = "disabled";
                                compatible = "fsl,imx27-usb";
                                reg = <0x10024400 0x200>;
                                interrupts = <55>;
-                               clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
+                               clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
+                                       <&clks IMX27_CLK_USB_AHB_GATE>,
+                                       <&clks IMX27_CLK_USB_DIV>;
+                               clock-names = "ipg", "ahb", "per";
                                fsl,usbmisc = <&usbmisc 2>;
                                dr_mode = "host";
                                status = "disabled";
                                #index-cells = <1>;
                                compatible = "fsl,imx27-usbmisc";
                                reg = <0x10024600 0x200>;
-                               clocks = <&clks IMX27_CLK_USB_AHB_GATE>;
                        };
 
                        sahara2: sahara@10025000 {
index 01aef23..5acbd0d 100644 (file)
@@ -137,7 +137,7 @@ netcp: netcp@26000000 {
        /* NetCP address range */
        ranges = <0 0x26000000 0x1000000>;
 
-       clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>;
+       clocks = <&clkosr>, <&papllclk>, <&clkcpgmac>, <&chipclk12>;
        dma-coherent;
 
        ti,navigator-dmas = <&dma_gbe 0>,
index c56ab6b..0e46560 100644 (file)
@@ -40,7 +40,7 @@
                };
                poweroff@12100 {
                        compatible = "qnap,power-off";
-                       reg = <0x12000 0x100>;
+                       reg = <0x12100 0x100>;
                        clocks = <&gate_clk 7>;
                };
                spi@10600 {
index 8fd8ef2..85f0373 100644 (file)
        };
 };
 
+&emmc {
+       /delete-property/mmc-hs200-1_8v;
+};
+
 &gpio_keys {
        pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
 
index 6a79c9c..04ea209 100644 (file)
                clock-names = "tsadc", "apb_pclk";
                resets = <&cru SRST_TSADC>;
                reset-names = "tsadc-apb";
-               pinctrl-names = "default";
-               pinctrl-0 = <&otp_out>;
+               pinctrl-names = "init", "default", "sleep";
+               pinctrl-0 = <&otp_gpio>;
+               pinctrl-1 = <&otp_out>;
+               pinctrl-2 = <&otp_gpio>;
                #thermal-sensor-cells = <1>;
                rockchip,hw-tshut-temp = <95000>;
                status = "disabled";
                };
 
                tsadc {
+                       otp_gpio: otp-gpio {
+                               rockchip,pins = <0 10 RK_FUNC_GPIO &pcfg_pull_none>;
+                       };
+
                        otp_out: otp-out {
                                rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>;
                        };
index d9a9aca..e812f5c 100644 (file)
@@ -49,7 +49,7 @@
                        label = "pb_user1";
                        gpios = <&pioE 27 GPIO_ACTIVE_HIGH>;
                        linux,code = <0x100>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
index 15bbaf6..2193637 100644 (file)
                        };
 
                        watchdog@fc068640 {
-                               compatible = "atmel,at91sam9260-wdt";
+                               compatible = "atmel,sama5d4-wdt";
                                reg = <0xfc068640 0x10>;
                                clocks = <&clk32k>;
                                status = "disabled";
index 12edafe..9beea89 100644 (file)
                        label = "user_pb";
                        gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
                        linux,code = <28>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 68c0de3..8cc6edb 100644 (file)
                        label = "user_pb";
                        gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
                        linux,code = <28>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 6736bae..0d5acc2 100644 (file)
                                interrupts = <67 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks VF610_CLK_DSPI0>;
                                clock-names = "dspi";
-                               spi-num-chipselects = <5>;
+                               spi-num-chipselects = <6>;
                                status = "disabled";
                        };
 
                                interrupts = <68 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks VF610_CLK_DSPI1>;
                                clock-names = "dspi";
-                               spi-num-chipselects = <5>;
+                               spi-num-chipselects = <4>;
                                status = "disabled";
                        };
 
                                clock-names = "adc";
                                #io-channel-cells = <1>;
                                status = "disabled";
+                               fsl,adck-max-frequency = <30000000>, <40000000>,
+                                                       <20000000>;
                        };
 
                        esdhc0: esdhc@400b1000 {
                                        <&clks VF610_CLK_ESDHC0>;
                                clock-names = "ipg", "ahb", "per";
                                status = "disabled";
-                               fsl,adck-max-frequency = <30000000>, <40000000>,
-                                                       <20000000>;
                        };
 
                        esdhc1: esdhc@400b2000 {
index 1b1e5ac..e4b1be6 100644 (file)
@@ -125,7 +125,6 @@ CONFIG_POWER_RESET=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT91SAM9X_WATCHDOG=y
-CONFIG_SSB=m
 CONFIG_MFD_ATMEL_HLCDC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
index a0c57ac..63f7e6c 100644 (file)
@@ -129,7 +129,6 @@ CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_POWER_RESET=y
 # CONFIG_HWMON is not set
-CONFIG_SSB=m
 CONFIG_MFD_ATMEL_FLEXCOM=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
index be1d07d..1bd9510 100644 (file)
@@ -40,6 +40,11 @@ extern void arch_trigger_all_cpu_backtrace(bool);
 #define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x)
 #endif
 
+static inline int nr_legacy_irqs(void)
+{
+       return NR_IRQS_LEGACY;
+}
+
 #endif
 
 #endif
index 7a2a32a..ede692f 100644 (file)
 #define __NR_execveat                  (__NR_SYSCALL_BASE+387)
 #define __NR_userfaultfd               (__NR_SYSCALL_BASE+388)
 #define __NR_membarrier                        (__NR_SYSCALL_BASE+389)
+#define __NR_mlock2                    (__NR_SYSCALL_BASE+390)
 
 /*
  * The following SWIs are ARM private.
index 6551d28..066f7f9 100644 (file)
 #include <asm/mach/pci.h>
 
 static int debug_pci;
-static resource_size_t (*align_resource)(struct pci_dev *dev,
-                 const struct resource *res,
-                 resource_size_t start,
-                 resource_size_t size,
-                 resource_size_t align) = NULL;
 
 /*
  * We can't use pci_get_device() here since we are
@@ -461,7 +456,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
                sys->busnr   = busnr;
                sys->swizzle = hw->swizzle;
                sys->map_irq = hw->map_irq;
-               align_resource = hw->align_resource;
                INIT_LIST_HEAD(&sys->resources);
 
                if (hw->private_data)
@@ -470,6 +464,8 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
                ret = hw->setup(nr, sys);
 
                if (ret > 0) {
+                       struct pci_host_bridge *host_bridge;
+
                        ret = pcibios_init_resources(nr, sys);
                        if (ret)  {
                                kfree(sys);
@@ -491,6 +487,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
                        busnr = sys->bus->busn_res.end + 1;
 
                        list_add(&sys->node, head);
+
+                       host_bridge = pci_find_host_bridge(sys->bus);
+                       host_bridge->align_resource = hw->align_resource;
                } else {
                        kfree(sys);
                        if (ret < 0)
@@ -578,14 +577,18 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 {
        struct pci_dev *dev = data;
        resource_size_t start = res->start;
+       struct pci_host_bridge *host_bridge;
 
        if (res->flags & IORESOURCE_IO && start & 0x300)
                start = (start + 0x3ff) & ~0x3ff;
 
        start = (start + align - 1) & ~(align - 1);
 
-       if (align_resource)
-               return align_resource(dev, res, start, size, align);
+       host_bridge = pci_find_host_bridge(dev->bus);
+
+       if (host_bridge->align_resource)
+               return host_bridge->align_resource(dev, res,
+                               start, size, align);
 
        return start;
 }
index fde6c88..ac368bb 100644 (file)
                CALL(sys_execveat)
                CALL(sys_userfaultfd)
                CALL(sys_membarrier)
+               CALL(sys_mlock2)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index eab83b2..e06fd29 100644 (file)
@@ -563,18 +563,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                if (vcpu->arch.power_off || vcpu->arch.pause)
                        vcpu_sleep(vcpu);
 
-               /*
-                * Disarming the background timer must be done in a
-                * preemptible context, as this call may sleep.
-                */
-               kvm_timer_flush_hwstate(vcpu);
-
                /*
                 * Preparing the interrupts to be injected also
                 * involves poking the GIC, which must be done in a
                 * non-preemptible context.
                 */
                preempt_disable();
+               kvm_timer_flush_hwstate(vcpu);
                kvm_vgic_flush_hwstate(vcpu);
 
                local_irq_disable();
index 6984342..7dace90 100644 (file)
@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
        __kvm_flush_dcache_pud(pud);
 }
 
+static bool kvm_is_device_pfn(unsigned long pfn)
+{
+       return !pfn_valid(pfn);
+}
+
 /**
  * stage2_dissolve_pmd() - clear and flush huge PMD entry
  * @kvm:       pointer to kvm structure.
@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
                        kvm_tlb_flush_vmid_ipa(kvm, addr);
 
                        /* No need to invalidate the cache for device mappings */
-                       if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+                       if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
                                kvm_flush_dcache_pte(old_pte);
 
                        put_page(virt_to_page(pte));
@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
 
        pte = pte_offset_kernel(pmd, addr);
        do {
-               if (!pte_none(*pte) &&
-                   (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+               if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
                        kvm_flush_dcache_pte(*pte);
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
        return kvm_vcpu_dabt_iswrite(vcpu);
 }
 
-static bool kvm_is_device_pfn(unsigned long pfn)
-{
-       return !pfn_valid(pfn);
-}
-
 /**
  * stage2_wp_ptes - write protect PMD range
  * @pmd:       pointer to pmd entry
index 72d622b..df1d44b 100644 (file)
        @ check low interrupts
        ldr     \irqstat, [\base, #IRQ_CAUSE_LOW_OFF]
        ldr     \tmp, [\base, #IRQ_MASK_LOW_OFF]
-       mov     \irqnr, #31
+       mov     \irqnr, #32
        ands    \irqstat, \irqstat, \tmp
 
        @ if no low interrupts set, check high interrupts
        ldreq   \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF]
        ldreq   \tmp, [\base, #IRQ_MASK_HIGH_OFF]
-       moveq   \irqnr, #63
+       moveq   \irqnr, #64
        andeqs  \irqstat, \irqstat, \tmp
 
        @ find first active interrupt source
index 8e7976a..cfc696b 100644 (file)
@@ -177,6 +177,7 @@ static struct irq_chip imx_gpc_chip = {
        .irq_unmask             = imx_gpc_irq_unmask,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_set_wake           = imx_gpc_irq_set_wake,
+       .irq_set_type           = irq_chip_set_type_parent,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
 #endif
index 5305ec7..79e1f87 100644 (file)
@@ -143,9 +143,9 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
                 * Ensure that CPU power state is set to ON to avoid CPU
                 * powerdomain transition on wfi
                 */
-               clkdm_wakeup(cpu1_clkdm);
-               omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON);
-               clkdm_allow_idle(cpu1_clkdm);
+               clkdm_wakeup_nolock(cpu1_clkdm);
+               pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON);
+               clkdm_allow_idle_nolock(cpu1_clkdm);
 
                if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
                        while (gic_dist_disabled()) {
index cc8a987..48495ad 100644 (file)
@@ -890,6 +890,36 @@ static int _init_opt_clks(struct omap_hwmod *oh)
        return ret;
 }
 
+static void _enable_optional_clocks(struct omap_hwmod *oh)
+{
+       struct omap_hwmod_opt_clk *oc;
+       int i;
+
+       pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
+
+       for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+               if (oc->_clk) {
+                       pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
+                                __clk_get_name(oc->_clk));
+                       clk_enable(oc->_clk);
+               }
+}
+
+static void _disable_optional_clocks(struct omap_hwmod *oh)
+{
+       struct omap_hwmod_opt_clk *oc;
+       int i;
+
+       pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
+
+       for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+               if (oc->_clk) {
+                       pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
+                                __clk_get_name(oc->_clk));
+                       clk_disable(oc->_clk);
+               }
+}
+
 /**
  * _enable_clocks - enable hwmod main clock and interface clocks
  * @oh: struct omap_hwmod *
@@ -917,6 +947,9 @@ static int _enable_clocks(struct omap_hwmod *oh)
                        clk_enable(os->_clk);
        }
 
+       if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
+               _enable_optional_clocks(oh);
+
        /* The opt clocks are controlled by the device driver. */
 
        return 0;
@@ -948,41 +981,14 @@ static int _disable_clocks(struct omap_hwmod *oh)
                        clk_disable(os->_clk);
        }
 
+       if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
+               _disable_optional_clocks(oh);
+
        /* The opt clocks are controlled by the device driver. */
 
        return 0;
 }
 
-static void _enable_optional_clocks(struct omap_hwmod *oh)
-{
-       struct omap_hwmod_opt_clk *oc;
-       int i;
-
-       pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
-
-       for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
-               if (oc->_clk) {
-                       pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
-                                __clk_get_name(oc->_clk));
-                       clk_enable(oc->_clk);
-               }
-}
-
-static void _disable_optional_clocks(struct omap_hwmod *oh)
-{
-       struct omap_hwmod_opt_clk *oc;
-       int i;
-
-       pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
-
-       for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
-               if (oc->_clk) {
-                       pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
-                                __clk_get_name(oc->_clk));
-                       clk_disable(oc->_clk);
-               }
-}
-
 /**
  * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4
  * @oh: struct omap_hwmod *
index ca6df1a..76bce11 100644 (file)
@@ -523,6 +523,8 @@ struct omap_hwmod_omap4_prcm {
  * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up 
  *     events by calling _reconfigure_io_chain() when a device is enabled
  *     or idled.
+ * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
+ *     operate and they need to be handled at the same time as the main_clk.
  */
 #define HWMOD_SWSUP_SIDLE                      (1 << 0)
 #define HWMOD_SWSUP_MSTANDBY                   (1 << 1)
@@ -538,6 +540,7 @@ struct omap_hwmod_omap4_prcm {
 #define HWMOD_FORCE_MSTANDBY                   (1 << 11)
 #define HWMOD_SWSUP_SIDLE_ACT                  (1 << 12)
 #define HWMOD_RECONFIG_IO_CHAIN                        (1 << 13)
+#define HWMOD_OPT_CLKS_NEEDED                  (1 << 14)
 
 /*
  * omap_hwmod._int_flags definitions
index 51d1ecb..ee4e044 100644 (file)
@@ -1297,6 +1297,44 @@ static struct omap_hwmod dra7xx_mcspi4_hwmod = {
        .dev_attr       = &mcspi4_dev_attr,
 };
 
+/*
+ * 'mcasp' class
+ *
+ */
+static struct omap_hwmod_class_sysconfig dra7xx_mcasp_sysc = {
+       .sysc_offs      = 0x0004,
+       .sysc_flags     = SYSC_HAS_SIDLEMODE,
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type3,
+};
+
+static struct omap_hwmod_class dra7xx_mcasp_hwmod_class = {
+       .name   = "mcasp",
+       .sysc   = &dra7xx_mcasp_sysc,
+};
+
+/* mcasp3 */
+static struct omap_hwmod_opt_clk mcasp3_opt_clks[] = {
+       { .role = "ahclkx", .clk = "mcasp3_ahclkx_mux" },
+};
+
+static struct omap_hwmod dra7xx_mcasp3_hwmod = {
+       .name           = "mcasp3",
+       .class          = &dra7xx_mcasp_hwmod_class,
+       .clkdm_name     = "l4per2_clkdm",
+       .main_clk       = "mcasp3_aux_gfclk_mux",
+       .flags          = HWMOD_OPT_CLKS_NEEDED,
+       .prcm = {
+               .omap4 = {
+                       .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP3_CLKCTRL_OFFSET,
+                       .context_offs = DRA7XX_RM_L4PER2_MCASP3_CONTEXT_OFFSET,
+                       .modulemode   = MODULEMODE_SWCTRL,
+               },
+       },
+       .opt_clks       = mcasp3_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(mcasp3_opt_clks),
+};
+
 /*
  * 'mmc' class
  *
@@ -2566,6 +2604,22 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = {
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* l4_per2 -> mcasp3 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp3 = {
+       .master         = &dra7xx_l4_per2_hwmod,
+       .slave          = &dra7xx_mcasp3_hwmod,
+       .clk            = "l4_root_clk_div",
+       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> mcasp3 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp3 = {
+       .master         = &dra7xx_l3_main_1_hwmod,
+       .slave          = &dra7xx_mcasp3_hwmod,
+       .clk            = "l3_iclk_div",
+       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l4_per1 -> elm */
 static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = {
        .master         = &dra7xx_l4_per1_hwmod,
@@ -3308,6 +3362,8 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
        &dra7xx_l4_wkup__dcan1,
        &dra7xx_l4_per2__dcan2,
        &dra7xx_l4_per2__cpgmac0,
+       &dra7xx_l4_per2__mcasp3,
+       &dra7xx_l3_main_1__mcasp3,
        &dra7xx_gmac__mdio,
        &dra7xx_l4_cfg__dma_system,
        &dra7xx_l3_main_1__dss,
index b1288f5..6256052 100644 (file)
@@ -144,6 +144,7 @@ static struct omap_hwmod dm81xx_l4_ls_hwmod = {
        .name           = "l4_ls",
        .clkdm_name     = "alwon_l3s_clkdm",
        .class          = &l4_hwmod_class,
+       .flags          = HWMOD_NO_IDLEST,
 };
 
 /*
@@ -155,6 +156,7 @@ static struct omap_hwmod dm81xx_l4_hs_hwmod = {
        .name           = "l4_hs",
        .clkdm_name     = "alwon_l3_med_clkdm",
        .class          = &l4_hwmod_class,
+       .flags          = HWMOD_NO_IDLEST,
 };
 
 /* L3 slow -> L4 ls peripheral interface running at 125MHz */
@@ -850,6 +852,7 @@ static struct omap_hwmod dm816x_emac0_hwmod = {
        .name           = "emac0",
        .clkdm_name     = "alwon_ethernet_clkdm",
        .class          = &dm816x_emac_hwmod_class,
+       .flags          = HWMOD_NO_IDLEST,
 };
 
 static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = {
index 1dfe346..5814477 100644 (file)
@@ -24,9 +24,6 @@
 #include <linux/platform_data/iommu-omap.h>
 #include <linux/platform_data/wkup_m3.h>
 
-#include <asm/siginfo.h>
-#include <asm/signal.h>
-
 #include "common.h"
 #include "common-board-devices.h"
 #include "dss-common.h"
@@ -385,29 +382,6 @@ static void __init omap3_pandora_legacy_init(void)
 }
 #endif /* CONFIG_ARCH_OMAP3 */
 
-#ifdef CONFIG_SOC_TI81XX
-static int fault_fixed_up;
-
-static int t410_abort_handler(unsigned long addr, unsigned int fsr,
-                             struct pt_regs *regs)
-{
-       if ((fsr == 0x406 || fsr == 0xc06) && !fault_fixed_up) {
-               pr_warn("External imprecise Data abort at addr=%#lx, fsr=%#x ignored.\n",
-                       addr, fsr);
-               fault_fixed_up = 1;
-               return 0;
-       }
-
-       return 1;
-}
-
-static void __init t410_abort_init(void)
-{
-       hook_fault_code(16 + 6, t410_abort_handler, SIGBUS, BUS_OBJERR,
-                       "imprecise external abort");
-}
-#endif
-
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
 static struct iommu_platform_data omap4_iommu_pdata = {
        .reset_name = "mmu_cache",
@@ -536,9 +510,6 @@ static struct pdata_init pdata_quirks[] __initdata = {
        { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, },
        { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, },
 #endif
-#ifdef CONFIG_SOC_TI81XX
-       { "hp,t410", t410_abort_init, },
-#endif
 #ifdef CONFIG_SOC_OMAP5
        { "ti,omap5-uevm", omap5_uevm_legacy_init, },
 #endif
index 87b98bf..2dbd378 100644 (file)
@@ -301,11 +301,11 @@ static void omap3_pm_idle(void)
        if (omap_irq_pending())
                return;
 
-       trace_cpu_idle(1, smp_processor_id());
+       trace_cpu_idle_rcuidle(1, smp_processor_id());
 
        omap_sram_idle();
 
-       trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
 #ifdef CONFIG_SUSPEND
index 79eb502..73919a3 100644 (file)
@@ -21,5 +21,5 @@
        @ find cause bits that are unmasked
        ands    \irqstat, \irqstat, \tmp        @ clear Z flag if any
        clzne   \irqnr, \irqstat                @ calc irqnr
-       rsbne   \irqnr, \irqnr, #31
+       rsbne   \irqnr, \irqnr, #32
        .endm
index 13eba2b..8fbfb10 100644 (file)
@@ -344,7 +344,7 @@ void __init palm27x_pwm_init(int bl, int lcd)
 {
        palm_bl_power   = bl;
        palm_lcd_power  = lcd;
-       pwm_add_lookup(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup));
+       pwm_add_table(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup));
        platform_device_register(&palm27x_backlight);
 }
 #endif
index aebf6de..0b5c387 100644 (file)
@@ -169,7 +169,7 @@ static inline void palmtc_keys_init(void) {}
 #if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE)
 static struct pwm_lookup palmtc_pwm_lookup[] = {
        PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS,
-                  PWM_PERIOD_NORMAL),
+                  PWM_POLARITY_NORMAL),
 };
 
 static struct platform_pwm_backlight_data palmtc_backlight_data = {
index 1d2825c..5fce87f 100644 (file)
@@ -19,7 +19,7 @@
 #include "common.h"
 #include "rcar-gen2.h"
 
-static const char *r8a7793_boards_compat_dt[] __initconst = {
+static const char * const r8a7793_boards_compat_dt[] __initconst = {
        "renesas,r8a7793",
        NULL,
 };
index 7fdc5bf..446334a 100644 (file)
@@ -13,7 +13,7 @@ config SOC_ZX296702
        select ARM_GLOBAL_TIMER
        select HAVE_ARM_SCU if SMP
        select HAVE_ARM_TWD if SMP
-       select PM_GENERIC_DOMAINS
+       select PM_GENERIC_DOMAINS if PM
        help
          Support for ZTE ZX296702 SoC which is a dual core CortexA9MP
 endif
index 9ac16a4..871f217 100644 (file)
@@ -49,7 +49,7 @@ config ARM64
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_BITREVERSE
        select HAVE_ARCH_JUMP_LABEL
-       select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP
+       select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
@@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_834220
+       bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
+       depends on KVM
+       default y
+       help
+         This option adds an alternative code sequence to work around ARM
+         erratum 834220 on Cortex-A57 parts up to r1p2.
+
+         Affected Cortex-A57 parts might report a Stage 2 translation
+         fault as the result of a Stage 1 fault for load crossing a
+         page boundary when there is a permission or device memory
+         alignment fault at Stage 1 and a translation fault at Stage 2.
+
+         The workaround is to verify that the Stage 1 translation
+         doesn't generate a fault before handling the Stage 2 fault.
+         Please note that this does not necessarily enable the workaround,
+         as it depends on the alternative framework, which will only patch
+         the kernel if an affected CPU is detected.
+
+         If unsure, say Y.
+
 config ARM64_ERRATUM_845719
        bool "Cortex-A53: 845719: a load might read incorrect data"
        depends on COMPAT
index ce47792..f7bd9bf 100644 (file)
@@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey);
 static struct crypto_alg aes_alg = {
        .cra_name               = "aes",
        .cra_driver_name        = "aes-ce",
-       .cra_priority           = 300,
+       .cra_priority           = 250,
        .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          = AES_BLOCK_SIZE,
        .cra_ctxsize            = sizeof(struct crypto_aes_ctx),
index 624f967..9622eb4 100644 (file)
@@ -64,27 +64,31 @@ do {                                                                        \
 
 #define smp_load_acquire(p)                                            \
 ({                                                                     \
-       typeof(*p) ___p1;                                               \
+       union { typeof(*p) __val; char __c[1]; } __u;                   \
        compiletime_assert_atomic_type(*p);                             \
        switch (sizeof(*p)) {                                           \
        case 1:                                                         \
                asm volatile ("ldarb %w0, %1"                           \
-                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+                       : "=r" (*(__u8 *)__u.__c)                       \
+                       : "Q" (*p) : "memory");                         \
                break;                                                  \
        case 2:                                                         \
                asm volatile ("ldarh %w0, %1"                           \
-                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+                       : "=r" (*(__u16 *)__u.__c)                      \
+                       : "Q" (*p) : "memory");                         \
                break;                                                  \
        case 4:                                                         \
                asm volatile ("ldar %w0, %1"                            \
-                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+                       : "=r" (*(__u32 *)__u.__c)                      \
+                       : "Q" (*p) : "memory");                         \
                break;                                                  \
        case 8:                                                         \
                asm volatile ("ldar %0, %1"                             \
-                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+                       : "=r" (*(__u64 *)__u.__c)                      \
+                       : "Q" (*p) : "memory");                         \
                break;                                                  \
        }                                                               \
-       ___p1;                                                          \
+       __u.__val;                                                      \
 })
 
 #define read_barrier_depends()         do { } while(0)
index 7fbed69..eb8432b 100644 (file)
@@ -23,7 +23,6 @@
  */
 #include <linux/types.h>
 #include <linux/sched.h>
-#include <linux/ptrace.h>
 
 #define COMPAT_USER_HZ         100
 #ifdef __AARCH64EB__
@@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
+#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
 
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
index 11d5bb0..8f271b8 100644 (file)
@@ -29,8 +29,9 @@
 #define ARM64_HAS_PAN                          4
 #define ARM64_HAS_LSE_ATOMICS                  5
 #define ARM64_WORKAROUND_CAVIUM_23154          6
+#define ARM64_WORKAROUND_834220                        7
 
-#define ARM64_NCAPS                            7
+#define ARM64_NCAPS                            8
 
 #ifndef __ASSEMBLY__
 
@@ -46,8 +47,12 @@ enum ftr_type {
 #define FTR_STRICT     true    /* SANITY check strict matching required */
 #define FTR_NONSTRICT  false   /* SANITY check ignored */
 
+#define FTR_SIGNED     true    /* Value should be treated as signed */
+#define FTR_UNSIGNED   false   /* Value should be treated as unsigned */
+
 struct arm64_ftr_bits {
-       bool            strict;   /* CPU Sanity check: strict matching required ? */
+       bool            sign;   /* Value is signed ? */
+       bool            strict; /* CPU Sanity check: strict matching required ? */
        enum ftr_type   type;
        u8              shift;
        u8              width;
@@ -123,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field)
        return cpuid_feature_extract_field_width(features, field, 4);
 }
 
+static inline unsigned int __attribute_const__
+cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
+{
+       return (u64)(features << (64 - width - field)) >> (64 - width);
+}
+
+static inline unsigned int __attribute_const__
+cpuid_feature_extract_unsigned_field(u64 features, int field)
+{
+       return cpuid_feature_extract_unsigned_field_width(features, field, 4);
+}
+
 static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
 {
        return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
@@ -130,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
 
 static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val)
 {
-       return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width);
+       return ftrp->sign ?
+               cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) :
+               cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width);
 }
 
 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
index 54d0ead..61e08f3 100644 (file)
@@ -18,7 +18,6 @@
 
 #ifdef __KERNEL__
 
-#include <linux/acpi.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 
 #include <asm/xen/hypervisor.h>
 
 #define DMA_ERROR_CODE (~(dma_addr_t)0)
-extern struct dma_map_ops *dma_ops;
 extern struct dma_map_ops dummy_dma_ops;
 
 static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
 {
-       if (unlikely(!dev))
-               return dma_ops;
-       else if (dev->archdata.dma_ops)
+       if (dev && dev->archdata.dma_ops)
                return dev->archdata.dma_ops;
-       else if (acpi_disabled)
-               return dma_ops;
 
        /*
-        * When ACPI is enabled, if arch_set_dma_ops is not called,
-        * we will disable device DMA capability by setting it
-        * to dummy_dma_ops.
+        * We expect no ISA devices, and all other DMA masters are expected to
+        * have someone call arch_setup_dma_ops at device creation time.
         */
        return &dummy_dma_ops;
 }
index e54415e..9732908 100644 (file)
@@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp;
 /* Determine number of BRP registers available. */
 static inline int get_num_brps(void)
 {
+       u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
        return 1 +
-               cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1),
+               cpuid_feature_extract_unsigned_field(dfr0,
                                                ID_AA64DFR0_BRPS_SHIFT);
 }
 
 /* Determine number of WRP registers available. */
 static inline int get_num_wrps(void)
 {
+       u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
        return 1 +
-               cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1),
+               cpuid_feature_extract_unsigned_field(dfr0,
                                                ID_AA64DFR0_WRPS_SHIFT);
 }
 
index 23eb450..8e8d306 100644 (file)
@@ -7,4 +7,9 @@ struct pt_regs;
 
 extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
 
+static inline int nr_legacy_irqs(void)
+{
+       return 0;
+}
+
 #endif
index 17e92f0..3ca894e 100644 (file)
@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
        *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
 }
 
+/*
+ * vcpu_reg should always be passed a register number coming from a
+ * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
+ * with banked registers.
+ */
 static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
 {
-       if (vcpu_mode_is_32bit(vcpu))
-               return vcpu_reg32(vcpu, reg_num);
-
        return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
 }
 
index c0e8789..2416578 100644 (file)
@@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void)
 #define destroy_context(mm)            do { } while(0)
 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
 
-#define init_new_context(tsk,mm)       ({ atomic64_set(&mm->context.id, 0); 0; })
+#define init_new_context(tsk,mm)       ({ atomic64_set(&(mm)->context.id, 0); 0; })
 
 /*
  * This is called when "tsk" is about to enter lazy TLB mode.
index 9819a94..7e074f9 100644 (file)
@@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 
 #define PAGE_KERNEL            __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_RO         __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+#define PAGE_KERNEL_ROX        __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
 #define PAGE_KERNEL_EXEC       __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_EXEC_CONT  __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
 
index 24926f2..feb6b4e 100644 (file)
@@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                           (1 << MIDR_VARIANT_SHIFT) | 2),
        },
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_834220
+       {
+       /* Cortex-A57 r0p0 - r1p2 */
+               .desc = "ARM erratum 834220",
+               .capability = ARM64_WORKAROUND_834220,
+               MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
+                          (1 << MIDR_VARIANT_SHIFT) | 2),
+       },
+#endif
 #ifdef CONFIG_ARM64_ERRATUM_845719
        {
        /* Cortex-A53 r0p[01234] */
index c8cf892..0669c63 100644 (file)
@@ -44,8 +44,9 @@ unsigned int compat_elf_hwcap2 __read_mostly;
 
 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
 
-#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
+#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
        {                                               \
+               .sign = SIGNED,                         \
                .strict = STRICT,                       \
                .type = TYPE,                           \
                .shift = SHIFT,                         \
@@ -53,6 +54,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
                .safe_val = SAFE_VAL,                   \
        }
 
+/* Define a feature with signed values */
+#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
+       __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
+
+/* Define a feature with unsigned value */
+#define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
+       __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
+
 #define ARM64_FTR_END                                  \
        {                                               \
                .width = 0,                             \
@@ -99,7 +108,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
         * Differing PARange is fine as long as all peripherals and memory are mapped
         * within the minimum PARange of all CPUs
         */
-       ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
+       U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
@@ -115,18 +124,18 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
 };
 
 static struct arm64_ftr_bits ftr_ctr[] = {
-       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),        /* RAO */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),      /* RAO */
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
-       ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0),  /* CWG */
-       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),   /* ERG */
-       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1),   /* DminLine */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0),        /* CWG */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
        /*
         * Linux can handle differing I-cache policies. Userspace JITs will
         * make use of *minLine
         */
-       ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0),     /* L1Ip */
+       U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0),   /* L1Ip */
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0),        /* RAZ */
-       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),    /* IminLine */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),  /* IminLine */
        ARM64_FTR_END,
 };
 
@@ -144,12 +153,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = {
 
 static struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
-       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
        ARM64_FTR_END,
 };
 
index 706679d..212ae63 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/seq_file.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
+#include <linux/delay.h>
 
 /*
  * In case the boot CPU is hotpluggable, we record its initial state and
@@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v)
                 */
                seq_printf(m, "processor\t: %d\n", i);
 
+               seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+                          loops_per_jiffy / (500000UL/HZ),
+                          loops_per_jiffy / (5000UL/HZ) % 100);
+
                /*
                 * Dump out the common processor features in a single line.
                 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
index de46b50..4eeb171 100644 (file)
@@ -127,7 +127,11 @@ static int __init uefi_init(void)
        table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
        config_tables = early_memremap(efi_to_phys(efi.systab->tables),
                                       table_size);
-
+       if (config_tables == NULL) {
+               pr_warn("Unable to map EFI config table array.\n");
+               retval = -ENOMEM;
+               goto out;
+       }
        retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
                                         sizeof(efi_config_table_64_t), NULL);
 
@@ -209,6 +213,14 @@ void __init efi_init(void)
                         PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
        memmap.phys_map = params.mmap;
        memmap.map = early_memremap(params.mmap, params.mmap_size);
+       if (memmap.map == NULL) {
+               /*
+               * If we are booting via UEFI, the UEFI memory map is the only
+               * description of memory we have, so there is little point in
+               * proceeding if we cannot access it.
+               */
+               panic("Unable to map EFI memory map.\n");
+       }
        memmap.map_end = memmap.map + params.mmap_size;
        memmap.desc_size = params.desc_size;
        memmap.desc_version = params.desc_ver;
@@ -224,8 +236,9 @@ static bool __init efi_virtmap_init(void)
 {
        efi_memory_desc_t *md;
 
+       init_new_context(NULL, &efi_mm);
+
        for_each_efi_memory_desc(&memmap, md) {
-               u64 paddr, npages, size;
                pgprot_t prot;
 
                if (!(md->attribute & EFI_MEMORY_RUNTIME))
@@ -233,11 +246,6 @@ static bool __init efi_virtmap_init(void)
                if (md->virt_addr == 0)
                        return false;
 
-               paddr = md->phys_addr;
-               npages = md->num_pages;
-               memrange_efi_to_native(&paddr, &npages);
-               size = npages << PAGE_SHIFT;
-
                pr_info("  EFI remap 0x%016llx => %p\n",
                        md->phys_addr, (void *)md->virt_addr);
 
@@ -254,7 +262,9 @@ static bool __init efi_virtmap_init(void)
                else
                        prot = PAGE_KERNEL;
 
-               create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
+               create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr,
+                                  md->num_pages << EFI_PAGE_SHIFT, 
+                                  __pgprot(pgprot_val(prot) | PTE_NG));
        }
        return true;
 }
@@ -270,12 +280,12 @@ static int __init arm64_enable_runtime_services(void)
 
        if (!efi_enabled(EFI_BOOT)) {
                pr_info("EFI services will not be available.\n");
-               return -1;
+               return 0;
        }
 
        if (efi_runtime_disabled()) {
                pr_info("EFI runtime services will be disabled.\n");
-               return -1;
+               return 0;
        }
 
        pr_info("Remapping and enabling EFI services.\n");
@@ -285,7 +295,7 @@ static int __init arm64_enable_runtime_services(void)
                                                   mapsize);
        if (!memmap.map) {
                pr_err("Failed to remap EFI memory map\n");
-               return -1;
+               return -ENOMEM;
        }
        memmap.map_end = memmap.map + mapsize;
        efi.memmap = &memmap;
@@ -294,13 +304,13 @@ static int __init arm64_enable_runtime_services(void)
                                                   sizeof(efi_system_table_t));
        if (!efi.systab) {
                pr_err("Failed to remap EFI System Table\n");
-               return -1;
+               return -ENOMEM;
        }
        set_bit(EFI_SYSTEM_TABLES, &efi.flags);
 
        if (!efi_virtmap_init()) {
                pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
-               return -1;
+               return -ENOMEM;
        }
 
        /* Set up runtime services function pointers */
@@ -329,14 +339,7 @@ core_initcall(arm64_dmi_init);
 
 static void efi_set_pgd(struct mm_struct *mm)
 {
-       if (mm == &init_mm)
-               cpu_set_reserved_ttbr0();
-       else
-               cpu_switch_mm(mm->pgd, mm);
-
-       local_flush_tlb_all();
-       if (icache_is_aivivt())
-               __local_flush_icache_all();
+       switch_mm(NULL, mm, NULL);
 }
 
 void efi_virtmap_load(void)
index fce95e1..1095aa4 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/ftrace.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
 #include <asm/cacheflush.h>
@@ -70,6 +71,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
         */
        local_dbg_save(flags);
 
+       /*
+        * Function graph tracer state gets incosistent when the kernel
+        * calls functions that never return (aka suspend finishers) hence
+        * disable graph tracing during their execution.
+        */
+       pause_graph_tracing();
+
        /*
         * mm context saved on the stack, it will be restored when
         * the cpu comes out of reset through the identity mapped
@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
                        hw_breakpoint_restore(NULL);
        }
 
+       unpause_graph_tracing();
+
        /*
         * Restore pstate flags. OS lock and mdscr have been already
         * restored, so from this point onwards, debugging is fully
index 1599701..86c2898 100644 (file)
@@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context)
 ENDPROC(__kvm_flush_vm_context)
 
 __kvm_hyp_panic:
+       // Stash PAR_EL1 before corrupting it in __restore_sysregs
+       mrs     x0, par_el1
+       push    x0, xzr
+
        // Guess the context by looking at VTTBR:
        // If zero, then we're already a host.
        // Otherwise restore a minimal host context before panicing.
@@ -898,7 +902,7 @@ __kvm_hyp_panic:
        mrs     x3, esr_el2
        mrs     x4, far_el2
        mrs     x5, hpfar_el2
-       mrs     x6, par_el1
+       pop     x6, xzr         // active context PAR_EL1
        mrs     x7, tpidr_el2
 
        mov     lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
@@ -914,7 +918,7 @@ __kvm_hyp_panic:
 ENDPROC(__kvm_hyp_panic)
 
 __hyp_panic_str:
-       .ascii  "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
+       .ascii  "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
 
        .align  2
 
@@ -1015,9 +1019,15 @@ el1_trap:
        b.ne    1f              // Not an abort we care about
 
        /* This is an abort. Check for permission fault */
+alternative_if_not ARM64_WORKAROUND_834220
        and     x2, x1, #ESR_ELx_FSC_TYPE
        cmp     x2, #FSC_PERM
        b.ne    1f              // Not a permission fault
+alternative_else
+       nop                     // Use the permission fault path to
+       nop                     // check for a valid S1 translation,
+       nop                     // regardless of the ESR value.
+alternative_endif
 
        /*
         * Check for Stage-1 page table walk, which is guaranteed
index 85c5715..648112e 100644 (file)
@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 
        /* Note: These now point to the banked copies */
        *vcpu_spsr(vcpu) = new_spsr_value;
-       *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+       *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
 
        /* Branch to exception vector */
        if (sctlr & (1 << 13))
index f636a26..e87f53f 100644 (file)
@@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu)
                __flush_icache_all();
 }
 
-static int is_reserved_asid(u64 asid)
+static bool check_update_reserved_asid(u64 asid, u64 newasid)
 {
        int cpu;
-       for_each_possible_cpu(cpu)
-               if (per_cpu(reserved_asids, cpu) == asid)
-                       return 1;
-       return 0;
+       bool hit = false;
+
+       /*
+        * Iterate over the set of reserved ASIDs looking for a match.
+        * If we find one, then we can update our mm to use newasid
+        * (i.e. the same ASID in the current generation) but we can't
+        * exit the loop early, since we need to ensure that all copies
+        * of the old ASID are updated to reflect the mm. Failure to do
+        * so could result in us missing the reserved ASID in a future
+        * generation.
+        */
+       for_each_possible_cpu(cpu) {
+               if (per_cpu(reserved_asids, cpu) == asid) {
+                       hit = true;
+                       per_cpu(reserved_asids, cpu) = newasid;
+               }
+       }
+
+       return hit;
 }
 
 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
        u64 generation = atomic64_read(&asid_generation);
 
        if (asid != 0) {
+               u64 newasid = generation | (asid & ~ASID_MASK);
+
                /*
                 * If our current ASID was active during a rollover, we
                 * can continue to use it and this was just a false alarm.
                 */
-               if (is_reserved_asid(asid))
-                       return generation | (asid & ~ASID_MASK);
+               if (check_update_reserved_asid(asid, newasid))
+                       return newasid;
 
                /*
                 * We had a valid ASID in a previous life, so try to re-use
@@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
                 */
                asid &= ~ASID_MASK;
                if (!__test_and_set_bit(asid, asid_map))
-                       goto bump_gen;
+                       return newasid;
        }
 
        /*
@@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 set_asid:
        __set_bit(asid, asid_map);
        cur_idx = asid;
-
-bump_gen:
-       asid |= generation;
-       return asid;
+       return asid | generation;
 }
 
 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
index 131a199..7963aa4 100644 (file)
@@ -18,6 +18,7 @@
  */
 
 #include <linux/gfp.h>
+#include <linux/acpi.h>
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/genalloc.h>
@@ -28,9 +29,6 @@
 
 #include <asm/cacheflush.h>
 
-struct dma_map_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
 static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
                                 bool coherent)
 {
@@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops);
 
 static int __init arm64_dma_init(void)
 {
-       int ret;
-
-       dma_ops = &swiotlb_dma_ops;
-
-       ret = atomic_pool_init();
-
-       return ret;
+       return atomic_pool_init();
 }
 arch_initcall(arm64_dma_init);
 
@@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
 {
        bool coherent = is_device_dma_coherent(dev);
        int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
+       size_t iosize = size;
        void *addr;
 
        if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
                return NULL;
+
+       size = PAGE_ALIGN(size);
+
        /*
         * Some drivers rely on this, and we probably don't want the
         * possibility of stale kernel data being read by devices anyway.
@@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                struct page **pages;
                pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
 
-               pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle,
+               pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
                                        flush_page);
                if (!pages)
                        return NULL;
@@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
                                              __builtin_return_address(0));
                if (!addr)
-                       iommu_dma_free(dev, pages, size, handle);
+                       iommu_dma_free(dev, pages, iosize, handle);
        } else {
                struct page *page;
                /*
@@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                if (!addr)
                        return NULL;
 
-               *handle = iommu_dma_map_page(dev, page, 0, size, ioprot);
+               *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
                if (iommu_dma_mapping_error(dev, *handle)) {
                        if (coherent)
                                __free_pages(page, get_order(size));
@@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
                               dma_addr_t handle, struct dma_attrs *attrs)
 {
+       size_t iosize = size;
+
+       size = PAGE_ALIGN(size);
        /*
         * @cpu_addr will be one of 3 things depending on how it was allocated:
         * - A remapped array of pages from iommu_dma_alloc(), for all
@@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
         * Hence how dodgy the below logic looks...
         */
        if (__in_atomic_pool(cpu_addr, size)) {
-               iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+               iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
                __free_from_pool(cpu_addr, size);
        } else if (is_vmalloc_addr(cpu_addr)){
                struct vm_struct *area = find_vm_area(cpu_addr);
 
                if (WARN_ON(!area || !area->pages))
                        return;
-               iommu_dma_free(dev, area->pages, size, &handle);
+               iommu_dma_free(dev, area->pages, iosize, &handle);
                dma_common_free_remap(cpu_addr, size, VM_USERMAP);
        } else {
-               iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+               iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
                __free_pages(virt_to_page(cpu_addr), get_order(size));
        }
 }
@@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                        struct iommu_ops *iommu, bool coherent)
 {
-       if (!acpi_disabled && !dev->archdata.dma_ops)
-               dev->archdata.dma_ops = dma_ops;
+       if (!dev->archdata.dma_ops)
+               dev->archdata.dma_ops = &swiotlb_dma_ops;
 
        dev->archdata.dma_coherent = coherent;
        __iommu_setup_dma_ops(dev, dma_base, size, iommu);
index 19211c4..92ddac1 100644 (file)
@@ -393,16 +393,16 @@ static struct fault_info {
        { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 1 translation fault"     },
        { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 2 translation fault"     },
        { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "level 3 translation fault"     },
-       { do_bad,               SIGBUS,  0,             "reserved access flag fault"    },
+       { do_bad,               SIGBUS,  0,             "unknown 8"                     },
        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 1 access flag fault"     },
        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 access flag fault"     },
        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 3 access flag fault"     },
-       { do_bad,               SIGBUS,  0,             "reserved permission fault"     },
+       { do_bad,               SIGBUS,  0,             "unknown 12"                    },
        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 1 permission fault"      },
        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 permission fault"      },
        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 3 permission fault"      },
        { do_bad,               SIGBUS,  0,             "synchronous external abort"    },
-       { do_bad,               SIGBUS,  0,             "asynchronous external abort"   },
+       { do_bad,               SIGBUS,  0,             "unknown 17"                    },
        { do_bad,               SIGBUS,  0,             "unknown 18"                    },
        { do_bad,               SIGBUS,  0,             "unknown 19"                    },
        { do_bad,               SIGBUS,  0,             "synchronous abort (translation table walk)" },
@@ -410,16 +410,16 @@ static struct fault_info {
        { do_bad,               SIGBUS,  0,             "synchronous abort (translation table walk)" },
        { do_bad,               SIGBUS,  0,             "synchronous abort (translation table walk)" },
        { do_bad,               SIGBUS,  0,             "synchronous parity error"      },
-       { do_bad,               SIGBUS,  0,             "asynchronous parity error"     },
+       { do_bad,               SIGBUS,  0,             "unknown 25"                    },
        { do_bad,               SIGBUS,  0,             "unknown 26"                    },
        { do_bad,               SIGBUS,  0,             "unknown 27"                    },
-       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk" },
-       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk" },
-       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk" },
-       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk" },
+       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk)" },
+       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk)" },
+       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk)" },
+       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk)" },
        { do_bad,               SIGBUS,  0,             "unknown 32"                    },
        { do_bad,               SIGBUS,  BUS_ADRALN,    "alignment fault"               },
-       { do_bad,               SIGBUS,  0,             "debug event"                   },
+       { do_bad,               SIGBUS,  0,             "unknown 34"                    },
        { do_bad,               SIGBUS,  0,             "unknown 35"                    },
        { do_bad,               SIGBUS,  0,             "unknown 36"                    },
        { do_bad,               SIGBUS,  0,             "unknown 37"                    },
@@ -433,21 +433,21 @@ static struct fault_info {
        { do_bad,               SIGBUS,  0,             "unknown 45"                    },
        { do_bad,               SIGBUS,  0,             "unknown 46"                    },
        { do_bad,               SIGBUS,  0,             "unknown 47"                    },
-       { do_bad,               SIGBUS,  0,             "unknown 48"                    },
+       { do_bad,               SIGBUS,  0,             "TLB conflict abort"            },
        { do_bad,               SIGBUS,  0,             "unknown 49"                    },
        { do_bad,               SIGBUS,  0,             "unknown 50"                    },
        { do_bad,               SIGBUS,  0,             "unknown 51"                    },
        { do_bad,               SIGBUS,  0,             "implementation fault (lockdown abort)" },
-       { do_bad,               SIGBUS,  0,             "unknown 53"                    },
+       { do_bad,               SIGBUS,  0,             "implementation fault (unsupported exclusive)" },
        { do_bad,               SIGBUS,  0,             "unknown 54"                    },
        { do_bad,               SIGBUS,  0,             "unknown 55"                    },
        { do_bad,               SIGBUS,  0,             "unknown 56"                    },
        { do_bad,               SIGBUS,  0,             "unknown 57"                    },
-       { do_bad,               SIGBUS,  0,             "implementation fault (coprocessor abort)" },
+       { do_bad,               SIGBUS,  0,             "unknown 58"                    },
        { do_bad,               SIGBUS,  0,             "unknown 59"                    },
        { do_bad,               SIGBUS,  0,             "unknown 60"                    },
-       { do_bad,               SIGBUS,  0,             "unknown 61"                    },
-       { do_bad,               SIGBUS,  0,             "unknown 62"                    },
+       { do_bad,               SIGBUS,  0,             "section domain fault"          },
+       { do_bad,               SIGBUS,  0,             "page domain fault"             },
        { do_bad,               SIGBUS,  0,             "unknown 63"                    },
 };
 
index e3f563c..873e363 100644 (file)
@@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot);
 
 static void __init *early_alloc(unsigned long sz)
 {
-       void *ptr = __va(memblock_alloc(sz, sz));
-       BUG_ON(!ptr);
+       phys_addr_t phys;
+       void *ptr;
+
+       phys = memblock_alloc(sz, sz);
+       BUG_ON(!phys);
+       ptr = __va(phys);
        memset(ptr, 0, sz);
        return ptr;
 }
@@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
        do {
                /*
                 * Need to have the least restrictive permissions available
-                * permissions will be fixed up later. Default the new page
-                * range as contiguous ptes.
+                * permissions will be fixed up later
                 */
-               set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT));
+               set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
                pfn++;
        } while (pte++, i++, i < PTRS_PER_PTE);
 }
 
-/*
- * Given a PTE with the CONT bit set, determine where the CONT range
- * starts, and clear the entire range of PTE CONT bits.
- */
-static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
-{
-       int i;
-
-       pte -= CONT_RANGE_OFFSET(addr);
-       for (i = 0; i < CONT_PTES; i++) {
-               set_pte(pte, pte_mknoncont(*pte));
-               pte++;
-       }
-       flush_tlb_all();
-}
-
-/*
- * Given a range of PTEs set the pfn and provided page protection flags
- */
-static void __populate_init_pte(pte_t *pte, unsigned long addr,
-                               unsigned long end, phys_addr_t phys,
-                               pgprot_t prot)
-{
-       unsigned long pfn = __phys_to_pfn(phys);
-
-       do {
-               /* clear all the bits except the pfn, then apply the prot */
-               set_pte(pte, pfn_pte(pfn, prot));
-               pte++;
-               pfn++;
-               addr += PAGE_SIZE;
-       } while (addr != end);
-}
-
 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
-                                 unsigned long end, phys_addr_t phys,
+                                 unsigned long end, unsigned long pfn,
                                  pgprot_t prot,
                                  void *(*alloc)(unsigned long size))
 {
        pte_t *pte;
-       unsigned long next;
 
        if (pmd_none(*pmd) || pmd_sect(*pmd)) {
                pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
@@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
 
        pte = pte_offset_kernel(pmd, addr);
        do {
-               next = min(end, (addr + CONT_SIZE) & CONT_MASK);
-               if (((addr | next | phys) & ~CONT_MASK) == 0) {
-                       /* a block of CONT_PTES  */
-                       __populate_init_pte(pte, addr, next, phys,
-                                           __pgprot(pgprot_val(prot) | PTE_CONT));
-               } else {
-                       /*
-                        * If the range being split is already inside of a
-                        * contiguous range but this PTE isn't going to be
-                        * contiguous, then we want to unmark the adjacent
-                        * ranges, then update the portion of the range we
-                        * are interrested in.
-                        */
-                        clear_cont_pte_range(pte, addr);
-                        __populate_init_pte(pte, addr, next, phys, prot);
-               }
-
-               pte += (next - addr) >> PAGE_SHIFT;
-               phys += next - addr;
-               addr = next;
-       } while (addr != end);
+               set_pte(pte, pfn_pte(pfn, prot));
+               pfn++;
+       } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
 static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
                                }
                        }
                } else {
-                       alloc_init_pte(pmd, addr, next, phys, prot, alloc);
+                       alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
+                                      prot, alloc);
                }
                phys += next - addr;
        } while (pmd++, addr = next, addr != end);
@@ -362,8 +313,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
         * for now. This will get more fine grained later once all memory
         * is mapped
         */
-       unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
-       unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+       unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
+       unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
 
        if (end < kernel_x_start) {
                create_mapping(start, __phys_to_virt(start),
@@ -451,18 +402,18 @@ static void __init fixup_executable(void)
 {
 #ifdef CONFIG_DEBUG_RODATA
        /* now that we are actually fully mapped, make the start/end more fine grained */
-       if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
+       if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
                unsigned long aligned_start = round_down(__pa(_stext),
-                                                       SECTION_SIZE);
+                                                        SWAPPER_BLOCK_SIZE);
 
                create_mapping(aligned_start, __phys_to_virt(aligned_start),
                                __pa(_stext) - aligned_start,
                                PAGE_KERNEL);
        }
 
-       if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
+       if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
                unsigned long aligned_end = round_up(__pa(__init_end),
-                                                       SECTION_SIZE);
+                                                         SWAPPER_BLOCK_SIZE);
                create_mapping(__pa(__init_end), (unsigned long)__init_end,
                                aligned_end - __pa(__init_end),
                                PAGE_KERNEL);
@@ -475,7 +426,7 @@ void mark_rodata_ro(void)
 {
        create_mapping_late(__pa(_stext), (unsigned long)_stext,
                                (unsigned long)_etext - (unsigned long)_stext,
-                               PAGE_KERNEL_EXEC | PTE_RDONLY);
+                               PAGE_KERNEL_ROX);
 
 }
 #endif
index f7836c6..c32f767 100644 (file)
@@ -98,7 +98,7 @@ static void __init mcf54xx_bootmem_alloc(void)
        memstart = PAGE_ALIGN(_ramstart);
        min_low_pfn = PFN_DOWN(_rambase);
        start_pfn = PFN_DOWN(memstart);
-       max_low_pfn = PFN_DOWN(_ramend);
+       max_pfn = max_low_pfn = PFN_DOWN(_ramend);
        high_memory = (void *)_ramend;
 
        m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
index 0793a7f..f9d96bf 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            375
+#define NR_syscalls            376
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 5e6fae6..36cf129 100644 (file)
 #define __NR_sendmmsg          372
 #define __NR_userfaultfd       373
 #define __NR_membarrier                374
+#define __NR_mlock2            375
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index 88c27d9..76b9113 100644 (file)
@@ -238,11 +238,14 @@ void __init setup_arch(char **cmdline_p)
         * Give all the memory to the bootmap allocator, tell it to put the
         * boot mem_map at the start of memory.
         */
+       min_low_pfn = PFN_DOWN(memory_start);
+       max_pfn = max_low_pfn = PFN_DOWN(memory_end);
+
        bootmap_size = init_bootmem_node(
                        NODE_DATA(0),
-                       memory_start >> PAGE_SHIFT, /* map goes here */
-                       PAGE_OFFSET >> PAGE_SHIFT,      /* 0 on coldfire */
-                       memory_end >> PAGE_SHIFT);
+                       min_low_pfn,            /* map goes here */
+                       PFN_DOWN(PAGE_OFFSET),
+                       max_pfn);
        /*
         * Free the usable memory, we have to make sure we do not free
         * the bootmem bitmap so we then reserve it after freeing it :-)
index 5dd0e80..282cd90 100644 (file)
@@ -395,3 +395,4 @@ ENTRY(sys_call_table)
        .long sys_sendmmsg
        .long sys_userfaultfd
        .long sys_membarrier
+       .long sys_mlock2                /* 375 */
index b958916..8f37fdd 100644 (file)
@@ -250,7 +250,7 @@ void __init paging_init(void)
        high_memory = phys_to_virt(max_addr);
 
        min_low_pfn = availmem >> PAGE_SHIFT;
-       max_low_pfn = max_addr >> PAGE_SHIFT;
+       max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
 
        for (i = 0; i < m68k_num_memory; i++) {
                addr = m68k_memory[i].addr;
index a8b942b..2a5f43a 100644 (file)
@@ -118,13 +118,13 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start,
        memory_end = memory_end & PAGE_MASK;
 
        start_page = __pa(memory_start) >> PAGE_SHIFT;
-       num_pages = __pa(memory_end) >> PAGE_SHIFT;
+       max_pfn = num_pages = __pa(memory_end) >> PAGE_SHIFT;
 
        high_memory = (void *)memory_end;
        availmem = memory_start;
 
        m68k_setup_node(0);
-       availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages);
+       availmem += init_bootmem(start_page, num_pages);
        availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
 
        free_bootmem(__pa(availmem), memory_end - (availmem));
index 1ba2120..8755d61 100644 (file)
@@ -216,9 +216,9 @@ void __init plat_mem_setup(void)
                                           AR71XX_RESET_SIZE);
        ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
                                         AR71XX_PLL_SIZE);
+       ath79_detect_sys_type();
        ath79_ddr_ctrl_init();
 
-       ath79_detect_sys_type();
        if (mips_machtype != ATH79_MACH_GENERIC_OF)
                detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
 
@@ -281,3 +281,8 @@ MIPS_MACHINE(ATH79_MACH_GENERIC,
             "Generic",
             "Generic AR71XX/AR724X/AR913X based board",
             ath79_generic_init);
+
+MIPS_MACHINE(ATH79_MACH_GENERIC_OF,
+            "DTB",
+            "Generic AR71XX/AR724X/AR913X based board (DT)",
+            NULL);
index fb7734e..13d0439 100644 (file)
                        miscintc: interrupt-controller@18060010 {
                                compatible = "qca,ar9132-misc-intc",
                                           "qca,ar7100-misc-intc";
-                               reg = <0x18060010 0x4>;
+                               reg = <0x18060010 0x8>;
 
                                interrupt-parent = <&cpuintc>;
                                interrupts = <6>;
index ad1fccd..2046c02 100644 (file)
@@ -200,8 +200,9 @@ static inline int pfn_valid(unsigned long pfn)
 {
        /* avoid <linux/mm.h> include hell */
        extern unsigned long max_mapnr;
+       unsigned long pfn_offset = ARCH_PFN_OFFSET;
 
-       return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr;
+       return pfn >= pfn_offset && pfn < max_mapnr;
 }
 
 #elif defined(CONFIG_SPARSEMEM)
index d5fa3ea..41b1b09 100644 (file)
@@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
 
        base = (inst >> 21) & 0x1f;
        op_inst = (inst >> 16) & 0x1f;
-       offset = inst & 0xffff;
+       offset = (int16_t)inst;
        cache = (inst >> 16) & 0x3;
        op = (inst >> 18) & 0x7;
 
index 7bab3a4..7e22108 100644 (file)
@@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run)
 
 FEXPORT(__kvm_mips_load_asid)
        /* Set the ASID for the Guest Kernel */
-       INT_SLL t0, t0, 1       /* with kseg0 @ 0x40000000, kernel */
-                               /* addresses shift to 0x80000000 */
-       bltz    t0, 1f          /* If kernel */
+       PTR_L   t0, VCPU_COP0(k1)
+       LONG_L  t0, COP0_STATUS(t0)
+       andi    t0, KSU_USER | ST0_ERL | ST0_EXL
+       xori    t0, KSU_USER
+       bnez    t0, 1f          /* If kernel */
         INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
        INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
 1:
@@ -474,9 +476,11 @@ __kvm_mips_return_to_guest:
        mtc0    t0, CP0_EPC
 
        /* Set the ASID for the Guest Kernel */
-       INT_SLL t0, t0, 1       /* with kseg0 @ 0x40000000, kernel */
-                               /* addresses shift to 0x80000000 */
-       bltz    t0, 1f          /* If kernel */
+       PTR_L   t0, VCPU_COP0(k1)
+       LONG_L  t0, COP0_STATUS(t0)
+       andi    t0, KSU_USER | ST0_ERL | ST0_EXL
+       xori    t0, KSU_USER
+       bnez    t0, 1f          /* If kernel */
         INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
        INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
 1:
index 49ff3bf..b9b803f 100644 (file)
@@ -279,7 +279,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 
        if (!gebase) {
                err = -ENOMEM;
-               goto out_free_cpu;
+               goto out_uninit_cpu;
        }
        kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
                  ALIGN(size, PAGE_SIZE), gebase);
@@ -343,6 +343,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 out_free_gebase:
        kfree(gebase);
 
+out_uninit_cpu:
+       kvm_vcpu_uninit(vcpu);
+
 out_free_cpu:
        kfree(vcpu);
 
index 8a97802..dbbeccc 100644 (file)
@@ -11,6 +11,7 @@
  *  by the Free Software Foundation.
  */
 
+#include <linux/delay.h>
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/io.h>
@@ -232,8 +233,7 @@ static int rt288x_pci_probe(struct platform_device *pdev)
        ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1;
 
        rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR);
-       for (i = 0; i < 0xfffff; i++)
-               ;
+       udelay(1);
 
        rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL);
        rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR);
index 4f925e0..78b2ef4 100644 (file)
@@ -10,6 +10,8 @@
  * option) any later version.
  */
 
+#include <linux/delay.h>
+
 #include <asm/bootinfo.h>
 #include <asm/cacheflush.h>
 #include <asm/idle.h>
@@ -77,7 +79,7 @@ void msp7120_reset(void)
         */
 
        /* Wait a bit for the DDRC to settle */
-       for (i = 0; i < 100000000; i++);
+       mdelay(125);
 
 #if defined(CONFIG_PMC_MSP7120_GW)
        /*
index 244f942..db8f88b 100644 (file)
@@ -3,6 +3,8 @@
  *
  *  Reset a SNI machine.
  */
+#include <linux/delay.h>
+
 #include <asm/io.h>
 #include <asm/reboot.h>
 #include <asm/sni.h>
@@ -32,9 +34,9 @@ void sni_machine_restart(char *command)
        for (;;) {
                for (i = 0; i < 100; i++) {
                        kb_wait();
-                       for (j = 0; j < 100000 ; j++)
-                               /* nothing */;
+                       udelay(50);
                        outb_p(0xfe, 0x64);      /* pulse reset low */
+                       udelay(50);
                }
        }
 }
index 4434b54..78ae555 100644 (file)
@@ -1,6 +1,7 @@
 config MN10300
        def_bool y
        select HAVE_OPROFILE
+       select HAVE_UID16
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_IPC_PARSE_VERSION
        select HAVE_ARCH_TRACEHOOK
@@ -37,9 +38,6 @@ config HIGHMEM
 config NUMA
        def_bool n
 
-config UID16
-       def_bool y
-
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
index 223cdcc..87bf88e 100644 (file)
@@ -23,22 +23,6 @@ static void __flush_dcache(unsigned long start, unsigned long end)
        end += (cpuinfo.dcache_line_size - 1);
        end &= ~(cpuinfo.dcache_line_size - 1);
 
-       for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
-               __asm__ __volatile__ ("   flushda 0(%0)\n"
-                                       : /* Outputs */
-                                       : /* Inputs  */ "r"(addr)
-                                       /* : No clobber */);
-       }
-}
-
-static void __flush_dcache_all(unsigned long start, unsigned long end)
-{
-       unsigned long addr;
-
-       start &= ~(cpuinfo.dcache_line_size - 1);
-       end += (cpuinfo.dcache_line_size - 1);
-       end &= ~(cpuinfo.dcache_line_size - 1);
-
        if (end > start + cpuinfo.dcache_size)
                end = start + cpuinfo.dcache_size;
 
@@ -112,7 +96,7 @@ static void flush_aliases(struct address_space *mapping, struct page *page)
 
 void flush_cache_all(void)
 {
-       __flush_dcache_all(0, cpuinfo.dcache_size);
+       __flush_dcache(0, cpuinfo.dcache_size);
        __flush_icache(0, cpuinfo.icache_size);
 }
 
@@ -182,7 +166,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
         */
        unsigned long start = (unsigned long)page_address(page);
 
-       __flush_dcache_all(start, start + PAGE_SIZE);
+       __flush_dcache(start, start + PAGE_SIZE);
 }
 
 void flush_dcache_page(struct page *page)
@@ -268,7 +252,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
 {
        flush_cache_page(vma, user_vaddr, page_to_pfn(page));
        memcpy(dst, src, len);
-       __flush_dcache_all((unsigned long)src, (unsigned long)src + len);
+       __flush_dcache((unsigned long)src, (unsigned long)src + len);
        if (vma->vm_flags & VM_EXEC)
                __flush_icache((unsigned long)src, (unsigned long)src + len);
 }
@@ -279,7 +263,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 {
        flush_cache_page(vma, user_vaddr, page_to_pfn(page));
        memcpy(dst, src, len);
-       __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len);
+       __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
        if (vma->vm_flags & VM_EXEC)
                __flush_icache((unsigned long)dst, (unsigned long)dst + len);
 }
index c365469..729f891 100644 (file)
@@ -108,6 +108,9 @@ config PGTABLE_LEVELS
        default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
        default 2
 
+config SYS_SUPPORTS_HUGETLBFS
+       def_bool y if PA20
+
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
new file mode 100644 (file)
index 0000000..7d56a9c
--- /dev/null
@@ -0,0 +1,85 @@
+#ifndef _ASM_PARISC64_HUGETLB_H
+#define _ASM_PARISC64_HUGETLB_H
+
+#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
+
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                    pte_t *ptep, pte_t pte);
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+                             pte_t *ptep);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+                                        unsigned long addr,
+                                        unsigned long len) {
+       return 0;
+}
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(struct file *file,
+                       unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+                                         unsigned long addr, unsigned long end,
+                                         unsigned long floor,
+                                         unsigned long ceiling)
+{
+       free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       pte_t old_pte = *ptep;
+       set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       int changed = !pte_same(*ptep, pte);
+       if (changed) {
+               set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+               flush_tlb_page(vma, addr);
+       }
+       return changed;
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
+#endif /* _ASM_PARISC64_HUGETLB_H */
index 60d5d17..80e742a 100644 (file)
@@ -145,11 +145,22 @@ extern int npmem_ranges;
 #endif /* CONFIG_DISCONTIGMEM */
 
 #ifdef CONFIG_HUGETLB_PAGE
-#define HPAGE_SHIFT            22      /* 4MB (is this fixed?) */
+#define HPAGE_SHIFT            PMD_SHIFT /* fixed for transparent huge pages */
 #define HPAGE_SIZE             ((1UL) << HPAGE_SHIFT)
 #define HPAGE_MASK             (~(HPAGE_SIZE - 1))
 #define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define REAL_HPAGE_SHIFT      20 /* 20 = 1MB */
+# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M
+#elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define REAL_HPAGE_SHIFT      22 /* 22 = 4MB */
+# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M
+#else
+# define REAL_HPAGE_SHIFT      24 /* 24 = 16MB */
+# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M
 #endif
+#endif /* CONFIG_HUGETLB_PAGE */
 
 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 
index 3edbb9f..f2fd327 100644 (file)
@@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
                                        PxD_FLAG_VALID | 
                                        PxD_FLAG_ATTACHED) 
                        + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
-               /* The first pmd entry also is marked with _PAGE_GATEWAY as
+               /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
                 * a signal that this pmd may not be freed */
                __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
 #endif
index f93c4a4..d8534f9 100644 (file)
@@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
        printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
 
 /* This is the size of the initially mapped kernel memory */
-#define KERNEL_INITIAL_ORDER   24      /* 0 to 1<<24 = 16MB */
+#ifdef CONFIG_64BIT
+#define KERNEL_INITIAL_ORDER   25      /* 1<<25 = 32MB */
+#else
+#define KERNEL_INITIAL_ORDER   24      /* 1<<24 = 16MB */
+#endif
 #define KERNEL_INITIAL_SIZE    (1 << KERNEL_INITIAL_ORDER)
 
 #if CONFIG_PGTABLE_LEVELS == 3
@@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 #define _PAGE_NO_CACHE_BIT 24   /* (0x080) Uncached Page (U bit) */
 #define _PAGE_ACCESSED_BIT 23   /* (0x100) Software: Page Accessed */
 #define _PAGE_PRESENT_BIT  22   /* (0x200) Software: translation valid */
-/* bit 21 was formerly the FLUSH bit but is now unused */
+#define _PAGE_HPAGE_BIT    21   /* (0x400) Software: Huge Page */
 #define _PAGE_USER_BIT     20   /* (0x800) Software: User accessible page */
 
 /* N.B. The bits are defined in terms of a 32 bit word above, so the */
@@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
 #define _PAGE_PRESENT  (1 << xlate_pabit(_PAGE_PRESENT_BIT))
+#define _PAGE_HUGE     (1 << xlate_pabit(_PAGE_HPAGE_BIT))
 #define _PAGE_USER     (1 << xlate_pabit(_PAGE_USER_BIT))
 
 #define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_ACCESSED)
@@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 #define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT))
 #define PxD_FLAG_MASK     (0xf)
 #define PxD_FLAG_SHIFT    (4)
-#define PxD_VALUE_SHIFT   (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */
+#define PxD_VALUE_SHIFT   (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
 
 #ifndef __ASSEMBLY__
 
@@ -362,6 +367,18 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; ret
 static inline pte_t pte_mkwrite(pte_t pte)     { pte_val(pte) |= _PAGE_WRITE; return pte; }
 static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
 
+/*
+ * Huge pte definitions.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+#define pte_huge(pte)           (pte_val(pte) & _PAGE_HUGE)
+#define pte_mkhuge(pte)         (__pte(pte_val(pte) | _PAGE_HUGE))
+#else
+#define pte_huge(pte)           (0)
+#define pte_mkhuge(pte)         (pte)
+#endif
+
+
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
@@ -410,8 +427,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 /* Find an entry in the second-level page table.. */
 
 #if CONFIG_PGTABLE_LEVELS == 3
+#define pmd_index(addr)         (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 #define pmd_offset(dir,address) \
-((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
+((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
 #else
 #define pmd_offset(dir,addr) ((pmd_t *) dir)
 #endif
index 54adb60..7e759ec 100644 (file)
@@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack);
  */
 typedef unsigned int elf_caddr_t;
 
-#define start_thread_som(regs, new_pc, new_sp) do {    \
-       unsigned long *sp = (unsigned long *)new_sp;    \
-       __u32 spaceid = (__u32)current->mm->context;    \
-       unsigned long pc = (unsigned long)new_pc;       \
-       /* offset pc for priv. level */                 \
-       pc |= 3;                                        \
-                                                       \
-       regs->iasq[0] = spaceid;                        \
-       regs->iasq[1] = spaceid;                        \
-       regs->iaoq[0] = pc;                             \
-       regs->iaoq[1] = pc + 4;                         \
-       regs->sr[2] = LINUX_GATEWAY_SPACE;              \
-       regs->sr[3] = 0xffff;                           \
-       regs->sr[4] = spaceid;                          \
-       regs->sr[5] = spaceid;                          \
-       regs->sr[6] = spaceid;                          \
-       regs->sr[7] = spaceid;                          \
-       regs->gr[ 0] = USER_PSW;                        \
-       regs->gr[30] = ((new_sp)+63)&~63;               \
-       regs->gr[31] = pc;                              \
-                                                       \
-       get_user(regs->gr[26],&sp[0]);                  \
-       get_user(regs->gr[25],&sp[-1]);                 \
-       get_user(regs->gr[24],&sp[-2]);                 \
-       get_user(regs->gr[23],&sp[-3]);                 \
-} while(0)
-
 /* The ELF abi wants things done a "wee bit" differently than
  * som does.  Supporting this behavior here avoids
  * having our own version of create_elf_tables.
index ecc3ae1..dd4d187 100644 (file)
 #define MADV_DONTFORK  10              /* don't inherit across fork */
 #define MADV_DOFORK    11              /* do inherit across fork */
 
-/* The range 12-64 is reserved for page size specification. */
-#define MADV_4K_PAGES   12              /* Use 4K pages  */
-#define MADV_16K_PAGES  14              /* Use 16K pages */
-#define MADV_64K_PAGES  16              /* Use 64K pages */
-#define MADV_256K_PAGES 18              /* Use 256K pages */
-#define MADV_1M_PAGES   20              /* Use 1 Megabyte pages */
-#define MADV_4M_PAGES   22              /* Use 4 Megabyte pages */
-#define MADV_16M_PAGES  24              /* Use 16 Megabyte pages */
-#define MADV_64M_PAGES  26              /* Use 64 Megabyte pages */
-
 #define MADV_MERGEABLE   65            /* KSM may merge identical pages */
 #define MADV_UNMERGEABLE 66            /* KSM may not merge identical pages */
 
index 59001ce..d2f6257 100644 (file)
@@ -289,6 +289,14 @@ int main(void)
        DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
        DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
        DEFINE(ASM_PT_INITIAL, PT_INITIAL);
+       BLANK();
+       /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
+        * and kernel data on physical huge pages */
+#ifdef CONFIG_HUGETLB_PAGE
+       DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
+#else
+       DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
+#endif
        BLANK();
        DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
        DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
index c5ef408..623496c 100644 (file)
        STREG           \pte,0(\ptp)
        .endm
 
+       /* We have (depending on the page size):
+        * - 38 to 52-bit Physical Page Number
+        * - 12 to 26-bit page offset
+        */
        /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
         * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
-       #define PAGE_ADD_SHIFT  (PAGE_SHIFT-12)
+       #define PAGE_ADD_SHIFT          (PAGE_SHIFT-12)
+       #define PAGE_ADD_HUGE_SHIFT     (REAL_HPAGE_SHIFT-12)
 
        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
-       .macro          convert_for_tlb_insert20 pte
+       .macro          convert_for_tlb_insert20 pte,tmp
+#ifdef CONFIG_HUGETLB_PAGE
+       copy            \pte,\tmp
+       extrd,u         \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+                               64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+
+       depdi           _PAGE_SIZE_ENCODING_DEFAULT,63,\
+                               (63-58)+PAGE_ADD_SHIFT,\pte
+       extrd,u,*=      \tmp,_PAGE_HPAGE_BIT+32,1,%r0
+       depdi           _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
+                               (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
+#else /* Huge pages disabled */
        extrd,u         \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
                                64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
        depdi           _PAGE_SIZE_ENCODING_DEFAULT,63,\
                                (63-58)+PAGE_ADD_SHIFT,\pte
+#endif
        .endm
 
        /* Convert the pte and prot to tlb insertion values.  How
         * this happens is quite subtle, read below */
-       .macro          make_insert_tlb spc,pte,prot
+       .macro          make_insert_tlb spc,pte,prot,tmp
        space_to_prot   \spc \prot        /* create prot id from space */
        /* The following is the real subtlety.  This is depositing
         * T <-> _PAGE_REFTRAP
        depdi           1,12,1,\prot
 
        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
-       convert_for_tlb_insert20 \pte
+       convert_for_tlb_insert20 \pte \tmp
        .endm
 
        /* Identical macro to make_insert_tlb above, except it
 
 
        /*
-        * Align fault_vector_20 on 4K boundary so that both
-        * fault_vector_11 and fault_vector_20 are on the
-        * same page. This is only necessary as long as we
-        * write protect the kernel text, which we may stop
-        * doing once we use large page translations to cover
-        * the static part of the kernel address space.
+        * Fault_vectors are architecturally required to be aligned on a 2K
+        * boundary
         */
 
        .text
-
-       .align 4096
+       .align 2048
 
 ENTRY(fault_vector_20)
        /* First vector is invalid (0) */
@@ -1147,7 +1159,7 @@ dtlb_miss_20w:
        tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20w
        update_accessed ptp,pte,t0,t1
 
-       make_insert_tlb spc,pte,prot
+       make_insert_tlb spc,pte,prot,t1
        
        idtlbt          pte,prot
 
@@ -1173,7 +1185,7 @@ nadtlb_miss_20w:
        tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
        update_accessed ptp,pte,t0,t1
 
-       make_insert_tlb spc,pte,prot
+       make_insert_tlb spc,pte,prot,t1
 
        idtlbt          pte,prot
 
@@ -1267,7 +1279,7 @@ dtlb_miss_20:
        tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20
        update_accessed ptp,pte,t0,t1
 
-       make_insert_tlb spc,pte,prot
+       make_insert_tlb spc,pte,prot,t1
 
        f_extend        pte,t1
 
@@ -1295,7 +1307,7 @@ nadtlb_miss_20:
        tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20
        update_accessed ptp,pte,t0,t1
 
-       make_insert_tlb spc,pte,prot
+       make_insert_tlb spc,pte,prot,t1
 
        f_extend        pte,t1
        
@@ -1404,7 +1416,7 @@ itlb_miss_20w:
        tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
        update_accessed ptp,pte,t0,t1
 
-       make_insert_tlb spc,pte,prot
+       make_insert_tlb spc,pte,prot,t1
        
        iitlbt          pte,prot
 
@@ -1428,7 +1440,7 @@ naitlb_miss_20w:
        tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20w
        update_accessed ptp,pte,t0,t1
 
-       make_insert_tlb spc,pte,prot
+       make_insert_tlb spc,pte,prot,t1
 
        iitlbt          pte,prot
 
@@ -1514,7 +1526,7 @@ itlb_miss_20:
        tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
        update_accessed ptp,pte,t0,t1
 
-       make_insert_tlb spc,pte,prot
+       make_insert_tlb spc,pte,prot,t1
 
        f_extend        pte,t1
 
@@ -1534,7 +1546,7 @@ naitlb_miss_20:
        tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20
        update_accessed ptp,pte,t0,t1
 
-       make_insert_tlb spc,pte,prot
+       make_insert_tlb spc,pte,prot,t1
 
        f_extend        pte,t1
 
@@ -1566,7 +1578,7 @@ dbit_trap_20w:
        tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
        update_dirty    ptp,pte,t1
 
-       make_insert_tlb spc,pte,prot
+       make_insert_tlb spc,pte,prot,t1
                
        idtlbt          pte,prot
 
@@ -1610,7 +1622,7 @@ dbit_trap_20:
        tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
        update_dirty    ptp,pte,t1
 
-       make_insert_tlb spc,pte,prot
+       make_insert_tlb spc,pte,prot,t1
 
        f_extend        pte,t1
        
index e7d6452..75aa0db 100644 (file)
@@ -69,7 +69,7 @@ $bss_loop:
        stw,ma          %arg2,4(%r1)
        stw,ma          %arg3,4(%r1)
 
-       /* Initialize startup VM. Just map first 8/16 MB of memory */
+       /* Initialize startup VM. Just map first 16/32 MB of memory */
        load32          PA(swapper_pg_dir),%r4
        mtctl           %r4,%cr24       /* Initialize kernel root pointer */
        mtctl           %r4,%cr25       /* Initialize user root pointer */
@@ -107,7 +107,7 @@ $bss_loop:
        /* Now initialize the PTEs themselves.  We use RWX for
         * everything ... it will get remapped correctly later */
        ldo             0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
-       ldi             (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
+       load32          (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
        load32          PA(pg0),%r1
 
 $pgt_fill_loop:
index 72a3c65..f7ea626 100644 (file)
@@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p)
        printk(KERN_INFO "The 32-bit Kernel has started...\n");
 #endif
 
-       printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024));
+       printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ",
+               (int)(PAGE_SIZE / 1024));
+#ifdef CONFIG_HUGETLB_PAGE
+       printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size",
+                1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20));
+#else
+       printk(KERN_CONT "disabled");
+#endif
+       printk(KERN_CONT ".\n");
+
 
        pdc_console_init();
 
@@ -377,6 +386,7 @@ arch_initcall(parisc_init);
 void start_parisc(void)
 {
        extern void start_kernel(void);
+       extern void early_trap_init(void);
 
        int ret, cpunum;
        struct pdc_coproc_cfg coproc_cfg;
@@ -397,6 +407,8 @@ void start_parisc(void)
                panic("must have an fpu to boot linux");
        }
 
+       early_trap_init(); /* initialize checksum of fault_vector */
+
        start_kernel();
        // not reached
 }
index 0b8d26d..3fbd725 100644 (file)
@@ -369,7 +369,7 @@ tracesys_exit:
        ldo     -16(%r30),%r29                  /* Reference param save area */
 #endif
        ldo     TASK_REGS(%r1),%r26
-       bl      do_syscall_trace_exit,%r2
+       BL      do_syscall_trace_exit,%r2
        STREG   %r28,TASK_PT_GR28(%r1)          /* save return value now */
        ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
        LDREG   TI_TASK(%r1), %r1
@@ -390,7 +390,7 @@ tracesys_sigexit:
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29                  /* Reference param save area */
 #endif
-       bl      do_syscall_trace_exit,%r2
+       BL      do_syscall_trace_exit,%r2
        ldo     TASK_REGS(%r1),%r26
 
        ldil    L%syscall_exit_rfi,%r1
index b99b39f..553b098 100644 (file)
@@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
 }
 
 
-int __init check_ivt(void *iva)
+void __init initialize_ivt(const void *iva)
 {
        extern u32 os_hpmc_size;
        extern const u32 os_hpmc[];
@@ -818,8 +818,8 @@ int __init check_ivt(void *iva)
        u32 *hpmcp;
        u32 length;
 
-       if (strcmp((char *)iva, "cows can fly"))
-               return -1;
+       if (strcmp((const char *)iva, "cows can fly"))
+               panic("IVT invalid");
 
        ivap = (u32 *)iva;
 
@@ -839,28 +839,23 @@ int __init check_ivt(void *iva)
            check += ivap[i];
 
        ivap[5] = -check;
-
-       return 0;
 }
        
-#ifndef CONFIG_64BIT
-extern const void fault_vector_11;
-#endif
-extern const void fault_vector_20;
 
-void __init trap_init(void)
+/* early_trap_init() is called before we set up kernel mappings and
+ * write-protect the kernel */
+void  __init early_trap_init(void)
 {
-       void *iva;
+       extern const void fault_vector_20;
 
-       if (boot_cpu_data.cpu_type >= pcxu)
-               iva = (void *) &fault_vector_20;
-       else
-#ifdef CONFIG_64BIT
-               panic("Can't boot 64-bit OS on PA1.1 processor!");
-#else
-               iva = (void *) &fault_vector_11;
+#ifndef CONFIG_64BIT
+       extern const void fault_vector_11;
+       initialize_ivt(&fault_vector_11);
 #endif
 
-       if (check_ivt(iva))
-               panic("IVT invalid");
+       initialize_ivt(&fault_vector_20);
+}
+
+void __init trap_init(void)
+{
 }
index 0dacc5c..308f290 100644 (file)
@@ -60,7 +60,7 @@ SECTIONS
                EXIT_DATA
        }
        PERCPU_SECTION(8)
-       . = ALIGN(PAGE_SIZE);
+       . = ALIGN(HUGEPAGE_SIZE);
        __init_end = .;
        /* freed after init ends here */
 
@@ -116,7 +116,7 @@ SECTIONS
         * that we can properly leave these
         * as writable
         */
-       . = ALIGN(PAGE_SIZE);
+       . = ALIGN(HUGEPAGE_SIZE);
        data_start = .;
 
        EXCEPTION_TABLE(8)
@@ -135,8 +135,11 @@ SECTIONS
        _edata = .;
 
        /* BSS */
-       BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8)
+       BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
+
+       /* bootmap is allocated in setup_bootmem() directly behind bss. */
 
+       . = ALIGN(HUGEPAGE_SIZE);
        _end = . ;
 
        STABS_DEBUG
index 758ceef..134393d 100644 (file)
@@ -3,3 +3,4 @@
 #
 
 obj-y   := init.o fault.o ioremap.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
new file mode 100644 (file)
index 0000000..f6fdc77
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * PARISC64 Huge TLB page support.
+ *
+ * This parisc implementation is heavily based on the SPARC and x86 code.
+ *
+ * Copyright (C) 2015 Helge Deller <deller@gmx.de>
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/sysctl.h>
+
+#include <asm/mman.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+
+unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       struct hstate *h = hstate_file(file);
+
+       if (len & ~huge_page_mask(h))
+               return -EINVAL;
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       if (flags & MAP_FIXED)
+               if (prepare_hugepage_range(file, addr, len))
+                       return -EINVAL;
+
+       if (addr)
+               addr = ALIGN(addr, huge_page_size(h));
+
+       /* we need to make sure the colouring is OK */
+       return arch_get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+                       unsigned long addr, unsigned long sz)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte = NULL;
+
+       /* We must align the address, because our caller will run
+        * set_huge_pte_at() on whatever we return, which writes out
+        * all of the sub-ptes for the hugepage range.  So we have
+        * to give it the first such sub-pte.
+        */
+       addr &= HPAGE_MASK;
+
+       pgd = pgd_offset(mm, addr);
+       pud = pud_alloc(mm, pgd, addr);
+       if (pud) {
+               pmd = pmd_alloc(mm, pud, addr);
+               if (pmd)
+                       pte = pte_alloc_map(mm, NULL, pmd, addr);
+       }
+       return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte = NULL;
+
+       addr &= HPAGE_MASK;
+
+       pgd = pgd_offset(mm, addr);
+       if (!pgd_none(*pgd)) {
+               pud = pud_offset(pgd, addr);
+               if (!pud_none(*pud)) {
+                       pmd = pmd_offset(pud, addr);
+                       if (!pmd_none(*pmd))
+                               pte = pte_offset_map(pmd, addr);
+               }
+       }
+       return pte;
+}
+
+/* Purge data and instruction TLB entries.  Must be called holding
+ * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
+ * machines since the purge must be broadcast to all CPUs.
+ */
+static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
+{
+       int i;
+
+       /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
+        * Linux standard huge pages (e.g. 2 MB) */
+       BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
+
+       addr &= HPAGE_MASK;
+       addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
+
+       for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
+               mtsp(mm->context, 1);
+               pdtlb(addr);
+               if (unlikely(split_tlb))
+                       pitlb(addr);
+               addr += (1UL << REAL_HPAGE_SHIFT);
+       }
+}
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                    pte_t *ptep, pte_t entry)
+{
+       unsigned long addr_start;
+       int i;
+
+       addr &= HPAGE_MASK;
+       addr_start = addr;
+
+       for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+               /* Directly write pte entry.  We could call set_pte_at(mm, addr, ptep, entry)
+                * instead, but then we get double locking on pa_tlb_lock. */
+               *ptep = entry;
+               ptep++;
+
+               /* Drop the PAGE_SIZE/non-huge tlb entry */
+               purge_tlb_entries(mm, addr);
+
+               addr += PAGE_SIZE;
+               pte_val(entry) += PAGE_SIZE;
+       }
+
+       purge_tlb_entries_huge(mm, addr_start);
+}
+
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+                             pte_t *ptep)
+{
+       pte_t entry;
+
+       entry = *ptep;
+       set_huge_pte_at(mm, addr, ptep, __pte(0));
+
+       return entry;
+}
+
+int pmd_huge(pmd_t pmd)
+{
+       return 0;
+}
+
+int pud_huge(pud_t pud)
+{
+       return 0;
+}
index c5fec48..1b366c4 100644 (file)
@@ -409,15 +409,11 @@ static void __init map_pages(unsigned long start_vaddr,
        unsigned long vaddr;
        unsigned long ro_start;
        unsigned long ro_end;
-       unsigned long fv_addr;
-       unsigned long gw_addr;
-       extern const unsigned long fault_vector_20;
-       extern void * const linux_gateway_page;
+       unsigned long kernel_end;
 
        ro_start = __pa((unsigned long)_text);
        ro_end   = __pa((unsigned long)&data_start);
-       fv_addr  = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
-       gw_addr  = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
+       kernel_end  = __pa((unsigned long)&_end);
 
        end_paddr = start_paddr + size;
 
@@ -475,24 +471,25 @@ static void __init map_pages(unsigned long start_vaddr,
                        for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
                                pte_t pte;
 
-                               /*
-                                * Map the fault vector writable so we can
-                                * write the HPMC checksum.
-                                */
                                if (force)
                                        pte =  __mk_pte(address, pgprot);
-                               else if (parisc_text_address(vaddr) &&
-                                        address != fv_addr)
+                               else if (parisc_text_address(vaddr)) {
                                        pte = __mk_pte(address, PAGE_KERNEL_EXEC);
+                                       if (address >= ro_start && address < kernel_end)
+                                               pte = pte_mkhuge(pte);
+                               }
                                else
 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
-                               if (address >= ro_start && address < ro_end
-                                                       && address != fv_addr
-                                                       && address != gw_addr)
-                                       pte = __mk_pte(address, PAGE_KERNEL_RO);
-                               else
+                               if (address >= ro_start && address < ro_end) {
+                                       pte = __mk_pte(address, PAGE_KERNEL_EXEC);
+                                       pte = pte_mkhuge(pte);
+                               } else
 #endif
+                               {
                                        pte = __mk_pte(address, pgprot);
+                                       if (address >= ro_start && address < kernel_end)
+                                               pte = pte_mkhuge(pte);
+                               }
 
                                if (address >= end_paddr) {
                                        if (force)
@@ -536,15 +533,12 @@ void free_initmem(void)
 
        /* force the kernel to see the new TLB entries */
        __flush_tlb_range(0, init_begin, init_end);
-       /* Attempt to catch anyone trying to execute code here
-        * by filling the page with BRK insns.
-        */
-       memset((void *)init_begin, 0x00, init_end - init_begin);
+
        /* finally dump all the instructions which were cached, since the
         * pages are no-longer executable */
        flush_icache_range(init_begin, init_end);
        
-       free_initmem_default(-1);
+       free_initmem_default(POISON_FREE_INITMEM);
 
        /* set up a new led state on systems shipped LED State panel */
        pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
@@ -728,8 +722,8 @@ static void __init pagetable_init(void)
                unsigned long size;
 
                start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
-               end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
                size = pmem_ranges[range].pages << PAGE_SHIFT;
+               end_paddr = start_paddr + size;
 
                map_pages((unsigned long)__va(start_paddr), start_paddr,
                          size, PAGE_KERNEL, 0);
index a908ada..2220f7a 100644 (file)
 #define MSR_TS_T       __MASK(MSR_TS_T_LG)     /*  Transaction Transactional */
 #define MSR_TS_MASK    (MSR_TS_T | MSR_TS_S)   /* Transaction State bits */
 #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
 #define MSR_TM_TRANSACTIONAL(x)        (((x) & MSR_TS_MASK) == MSR_TS_T)
 #define MSR_TM_SUSPENDED(x)    (((x) & MSR_TS_MASK) == MSR_TS_S)
 
index c9e26cb..f2b0b1b 100644 (file)
@@ -382,3 +382,4 @@ COMPAT_SYS(shmat)
 SYSCALL(shmdt)
 SYSCALL(shmget)
 COMPAT_SYS(shmctl)
+SYSCALL(mlock2)
index 6d8f802..4b6b8ac 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          378
+#define __NR_syscalls          379
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index 81579e9..1effea5 100644 (file)
 #define __NR_shmdt             375
 #define __NR_shmget            376
 #define __NR_shmctl            377
+#define __NR_mlock2            378
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 75b6676..646bf4d 100644 (file)
@@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
                msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
        }
 
+       /*
+        * Use the current MSR TM suspended bit to track if we have
+        * checkpointed state outstanding.
+        * On signal delivery, we'd normally reclaim the checkpointed
+        * state to obtain stack pointer (see:get_tm_stackpointer()).
+        * This will then directly return to userspace without going
+        * through __switch_to(). However, if the stack frame is bad,
+        * we need to exit this thread which calls __switch_to() which
+        * will again attempt to reclaim the already saved tm state.
+        * Hence we need to check that we've not already reclaimed
+        * this state.
+        * We do this using the current MSR, rather tracking it in
+        * some specific thread_struct bit, as it has the additional
+        * benifit of checking for a potential TM bad thing exception.
+        */
+       if (!MSR_TM_SUSPENDED(mfmsr()))
+               return;
+
        tm_reclaim(thr, thr->regs->msr, cause);
 
        /* Having done the reclaim, we now have the checkpointed
index 0dbee46..ef7c24e 100644 (file)
@@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs,
                return 1;
 #endif /* CONFIG_SPE */
 
+       /* Get the top half of the MSR from the user context */
+       if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+               return 1;
+       msr_hi <<= 32;
+       /* If TM bits are set to the reserved value, it's an invalid context */
+       if (MSR_TM_RESV(msr_hi))
+               return 1;
+       /* Pull in the MSR TM bits from the user context */
+       regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
        /* Now, recheckpoint.  This loads up all of the checkpointed (older)
         * registers, including FP and V[S]Rs.  After recheckpointing, the
         * transactional versions should be loaded.
@@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
        current->thread.tm_texasr |= TEXASR_FS;
        /* This loads the checkpointed FP/VEC state, if used */
        tm_recheckpoint(&current->thread, msr);
-       /* Get the top half of the MSR */
-       if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
-               return 1;
-       /* Pull in MSR TM from user context */
-       regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
 
        /* This loads the speculative FP/VEC state, if used */
        if (msr & MSR_FP) {
index 20756df..c676ece 100644 (file)
@@ -438,6 +438,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
 
        /* get MSR separately, transfer the LE bit if doing signal return */
        err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+       /* Don't allow reserved mode. */
+       if (MSR_TM_RESV(msr))
+               return -EINVAL;
+
        /* pull in MSR TM from user context */
        regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
 
index 373e323..6a75352 100644 (file)
@@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
                                   src_id, 0);
 
        /* sending vcpu invalid */
-       if (src_id >= KVM_MAX_VCPUS ||
-           kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
+       if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
                return -EINVAL;
 
        if (sclp.has_sigpif)
@@ -1110,6 +1109,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
        trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
                                   irq->u.emerg.code, 0);
 
+       /* sending vcpu invalid */
+       if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
+               return -EINVAL;
+
        set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
        set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
        atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
index 8fe2f1c..8465892 100644 (file)
@@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
                r = 0;
                break;
        case KVM_CAP_S390_VECTOR_REGISTERS:
-               if (MACHINE_HAS_VX) {
+               mutex_lock(&kvm->lock);
+               if (atomic_read(&kvm->online_vcpus)) {
+                       r = -EBUSY;
+               } else if (MACHINE_HAS_VX) {
                        set_kvm_facility(kvm->arch.model.fac->mask, 129);
                        set_kvm_facility(kvm->arch.model.fac->list, 129);
                        r = 0;
                } else
                        r = -EINVAL;
+               mutex_unlock(&kvm->lock);
                VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
                         r ? "(not available)" : "(success)");
                break;
index 77191b8..d76b51c 100644 (file)
@@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
 
        kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
 
-       if (!MACHINE_HAS_PFMF)
+       if (!test_kvm_facility(vcpu->kvm, 8))
                return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
 
        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
index da690b6..77c22d6 100644 (file)
@@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
                           u16 cpu_addr, u32 parameter, u64 *status_reg)
 {
        int rc;
-       struct kvm_vcpu *dst_vcpu;
+       struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
 
-       if (cpu_addr >= KVM_MAX_VCPUS)
-               return SIGP_CC_NOT_OPERATIONAL;
-
-       dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
        if (!dst_vcpu)
                return SIGP_CC_NOT_OPERATIONAL;
 
@@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
        trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
 
        if (order_code == SIGP_EXTERNAL_CALL) {
-               dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+               dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
                BUG_ON(dest_vcpu == NULL);
 
                kvm_s390_vcpu_wakeup(dest_vcpu);
index 9f39056..690b402 100644 (file)
@@ -35,7 +35,7 @@
 #define MSR_IA32_PERFCTR0              0x000000c1
 #define MSR_IA32_PERFCTR1              0x000000c2
 #define MSR_FSB_FREQ                   0x000000cd
-#define MSR_NHM_PLATFORM_INFO          0x000000ce
+#define MSR_PLATFORM_INFO              0x000000ce
 
 #define MSR_NHM_SNB_PKG_CST_CFG_CTL    0x000000e2
 #define NHM_C3_AUTO_DEMOTE             (1UL << 25)
@@ -44,7 +44,6 @@
 #define SNB_C1_AUTO_UNDEMOTE           (1UL << 27)
 #define SNB_C3_AUTO_UNDEMOTE           (1UL << 28)
 
-#define MSR_PLATFORM_INFO              0x000000ce
 #define MSR_MTRRcap                    0x000000fe
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 #define MSR_IA32_BBL_CR_CTL3           0x0000011e
index 4ddd780..c2b7522 100644 (file)
@@ -273,10 +273,9 @@ __setup("nosmap", setup_disable_smap);
 
 static __always_inline void setup_smap(struct cpuinfo_x86 *c)
 {
-       unsigned long eflags;
+       unsigned long eflags = native_save_fl();
 
        /* This should have been cleared long ago */
-       raw_local_save_flags(eflags);
        BUG_ON(eflags & X86_EFLAGS_AC);
 
        if (cpu_has(c, X86_FEATURE_SMAP)) {
index ef29b74..31c6a60 100644 (file)
@@ -385,20 +385,19 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
  */
 void fpu__init_prepare_fx_sw_frame(void)
 {
-       int fsave_header_size = sizeof(struct fregs_state);
        int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
 
-       if (config_enabled(CONFIG_X86_32))
-               size += fsave_header_size;
-
        fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
        fx_sw_reserved.extended_size = size;
        fx_sw_reserved.xfeatures = xfeatures_mask;
        fx_sw_reserved.xstate_size = xstate_size;
 
-       if (config_enabled(CONFIG_IA32_EMULATION)) {
+       if (config_enabled(CONFIG_IA32_EMULATION) ||
+           config_enabled(CONFIG_X86_32)) {
+               int fsave_header_size = sizeof(struct fregs_state);
+
                fx_sw_reserved_ia32 = fx_sw_reserved;
-               fx_sw_reserved_ia32.extended_size += fsave_header_size;
+               fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
        }
 }
 
index 6454f27..70fc312 100644 (file)
@@ -694,7 +694,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
        if (!boot_cpu_has(X86_FEATURE_XSAVE))
                return NULL;
 
-       xsave = &current->thread.fpu.state.xsave;
        /*
         * We should not ever be requesting features that we
         * have not enabled.  Remember that pcntxt_mask is
index 94ea120..87e1762 100644 (file)
@@ -278,6 +278,12 @@ trace:
        /* save_mcount_regs fills in first two parameters */
        save_mcount_regs
 
+       /*
+        * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
+        * set (see include/asm/ftrace.h and include/linux/ftrace.h).  Only the
+        * ip and parent ip are used and the list function is called when
+        * function tracing is enabled.
+        */
        call   *ftrace_trace_function
 
        restore_mcount_regs
index 87acc52..af823a3 100644 (file)
@@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
 
        switch (type) {
        case VMX_VPID_EXTENT_ALL_CONTEXT:
-               if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
-                       nested_vmx_failValid(vcpu,
-                               VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
-                       return 1;
-               }
                __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
                nested_vmx_succeed(vcpu);
                break;
index 00462bd..eed3228 100644 (file)
@@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
+{
+       return (!lapic_in_kernel(vcpu) ||
+               kvm_apic_accept_pic_intr(vcpu));
+}
+
+/*
+ * if userspace requested an interrupt window, check that the
+ * interrupt window is open.
+ *
+ * No need to exit to userspace if we already have an interrupt queued.
+ */
+static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
+{
+       return kvm_arch_interrupt_allowed(vcpu) &&
+               !kvm_cpu_has_interrupt(vcpu) &&
+               !kvm_event_needs_reinjection(vcpu) &&
+               kvm_cpu_accept_dm_intr(vcpu);
+}
+
 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
                                    struct kvm_interrupt *irq)
 {
@@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
                return -EEXIST;
 
        vcpu->arch.pending_external_vector = irq->irq;
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        return 0;
 }
 
@@ -5910,23 +5931,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
        return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
 }
 
-/*
- * Check if userspace requested an interrupt window, and that the
- * interrupt window is open.
- *
- * No need to exit to userspace if we already have an interrupt queued.
- */
 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm))
-               return false;
-
-       if (kvm_cpu_has_interrupt(vcpu))
-               return false;
-
-       return (irqchip_split(vcpu->kvm)
-               ? kvm_apic_accept_pic_intr(vcpu)
-               : kvm_arch_interrupt_allowed(vcpu));
+       return vcpu->run->request_interrupt_window &&
+               likely(!pic_in_kernel(vcpu->kvm));
 }
 
 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
@@ -5937,17 +5945,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
        kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
        kvm_run->cr8 = kvm_get_cr8(vcpu);
        kvm_run->apic_base = kvm_get_apic_base(vcpu);
-       if (!irqchip_in_kernel(vcpu->kvm))
-               kvm_run->ready_for_interrupt_injection =
-                       kvm_arch_interrupt_allowed(vcpu) &&
-                       !kvm_cpu_has_interrupt(vcpu) &&
-                       !kvm_event_needs_reinjection(vcpu);
-       else if (!pic_in_kernel(vcpu->kvm))
-               kvm_run->ready_for_interrupt_injection =
-                       kvm_apic_accept_pic_intr(vcpu) &&
-                       !kvm_cpu_has_interrupt(vcpu);
-       else
-               kvm_run->ready_for_interrupt_injection = 1;
+       kvm_run->ready_for_interrupt_injection =
+               pic_in_kernel(vcpu->kvm) ||
+               kvm_vcpu_ready_for_interrupt_injection(vcpu);
 }
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -6360,8 +6360,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 {
        int r;
-       bool req_int_win = !lapic_in_kernel(vcpu) &&
-               vcpu->run->request_interrupt_window;
+       bool req_int_win =
+               dm_request_for_irq_injection(vcpu) &&
+               kvm_cpu_accept_dm_intr(vcpu);
+
        bool req_immediate_exit = false;
 
        if (vcpu->requests) {
@@ -6663,7 +6665,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                if (kvm_cpu_has_pending_timer(vcpu))
                        kvm_inject_pending_timer_irqs(vcpu);
 
-               if (dm_request_for_irq_injection(vcpu)) {
+               if (dm_request_for_irq_injection(vcpu) &&
+                       kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
                        r = 0;
                        vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
                        ++vcpu->stat.request_irq_exits;
index b0ae85f..1202d5c 100644 (file)
@@ -585,6 +585,29 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
        return bt_addr;
 }
 
+/*
+ * We only want to do a 4-byte get_user() on 32-bit.  Otherwise,
+ * we might run off the end of the bounds table if we are on
+ * a 64-bit kernel and try to get 8 bytes.
+ */
+int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
+               long __user *bd_entry_ptr)
+{
+       u32 bd_entry_32;
+       int ret;
+
+       if (is_64bit_mm(mm))
+               return get_user(*bd_entry_ret, bd_entry_ptr);
+
+       /*
+        * Note that get_user() uses the type of the *pointer* to
+        * establish the size of the get, not the destination.
+        */
+       ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
+       *bd_entry_ret = bd_entry_32;
+       return ret;
+}
+
 /*
  * Get the base of bounds tables pointed by specific bounds
  * directory entry.
@@ -605,7 +628,7 @@ static int get_bt_addr(struct mm_struct *mm,
                int need_write = 0;
 
                pagefault_disable();
-               ret = get_user(bd_entry, bd_entry_ptr);
+               ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
                pagefault_enable();
                if (!ret)
                        break;
@@ -700,11 +723,23 @@ static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
  */
 static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
 {
-       unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
-       if (is_64bit_mm(mm))
-               return virt_space / MPX_BD_NR_ENTRIES_64;
-       else
-               return virt_space / MPX_BD_NR_ENTRIES_32;
+       unsigned long long virt_space;
+       unsigned long long GB = (1ULL << 30);
+
+       /*
+        * This covers 32-bit emulation as well as 32-bit kernels
+        * running on 64-bit harware.
+        */
+       if (!is_64bit_mm(mm))
+               return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
+
+       /*
+        * 'x86_virt_bits' returns what the hardware is capable
+        * of, and returns the full >32-bit adddress space when
+        * running 32-bit kernels on 64-bit hardware.
+        */
+       virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
+       return virt_space / MPX_BD_NR_ENTRIES_64;
 }
 
 /*
index de5716d..41a55ba 100644 (file)
@@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
        struct bio_vec bv, bvprv, *bvprvp = NULL;
        struct bvec_iter iter;
        unsigned seg_size = 0, nsegs = 0, sectors = 0;
+       unsigned front_seg_size = bio->bi_seg_front_size;
+       bool do_split = true;
+       struct bio *new = NULL;
 
        bio_for_each_segment(bv, bio, iter) {
                if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
@@ -98,7 +101,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 
                        seg_size += bv.bv_len;
                        bvprv = bv;
-                       bvprvp = &bv;
+                       bvprvp = &bvprv;
                        sectors += bv.bv_len >> 9;
                        continue;
                }
@@ -108,16 +111,29 @@ new_segment:
 
                nsegs++;
                bvprv = bv;
-               bvprvp = &bv;
+               bvprvp = &bvprv;
                seg_size = bv.bv_len;
                sectors += bv.bv_len >> 9;
+
+               if (nsegs == 1 && seg_size > front_seg_size)
+                       front_seg_size = seg_size;
        }
 
-       *segs = nsegs;
-       return NULL;
+       do_split = false;
 split:
        *segs = nsegs;
-       return bio_split(bio, sectors, GFP_NOIO, bs);
+
+       if (do_split) {
+               new = bio_split(bio, sectors, GFP_NOIO, bs);
+               if (new)
+                       bio = new;
+       }
+
+       bio->bi_seg_front_size = front_seg_size;
+       if (seg_size > bio->bi_seg_back_size)
+               bio->bi_seg_back_size = seg_size;
+
+       return do_split ? new : NULL;
 }
 
 void blk_queue_split(struct request_queue *q, struct bio **bio,
@@ -412,6 +428,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        if (sg)
                sg_mark_end(sg);
 
+       /*
+        * Something must have been wrong if the figured number of
+        * segment is bigger than number of req's physical segments
+        */
+       WARN_ON(nsegs > rq->nr_phys_segments);
+
        return nsegs;
 }
 EXPORT_SYMBOL(blk_rq_map_sg);
index 3ae09de..6d6f8fe 100644 (file)
@@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_bio_to_request(rq, bio);
 
                /*
-                * we do limited pluging. If bio can be merged, do merge.
+                * We do limited pluging. If the bio can be merged, do that.
                 * Otherwise the existing request in the plug list will be
                 * issued. So the plug list will have one request at most
                 */
                if (plug) {
                        /*
                         * The plug list might get flushed before this. If that
-                        * happens, same_queue_rq is invalid and plug list is empty
-                        **/
+                        * happens, same_queue_rq is invalid and plug list is
+                        * empty
+                        */
                        if (same_queue_rq && !list_empty(&plug->mq_list)) {
                                old_rq = same_queue_rq;
                                list_del_init(&old_rq->queuelist);
@@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_bio_to_request(rq, bio);
                if (!request_count)
                        trace_block_plug(q);
-               else if (request_count >= BLK_MAX_REQUEST_COUNT) {
+
+               blk_mq_put_ctx(data.ctx);
+
+               if (request_count >= BLK_MAX_REQUEST_COUNT) {
                        blk_flush_plug_list(plug, false);
                        trace_block_plug(q);
                }
+
                list_add_tail(&rq->queuelist, &plug->mq_list);
-               blk_mq_put_ctx(data.ctx);
                return cookie;
        }
 
index 246dfb1..aa40aa9 100644 (file)
@@ -158,11 +158,13 @@ void blk_abort_request(struct request *req)
 {
        if (blk_mark_rq_complete(req))
                return;
-       blk_delete_timer(req);
-       if (req->q->mq_ops)
+
+       if (req->q->mq_ops) {
                blk_mq_rq_timed_out(req, false);
-       else
+       } else {
+               blk_delete_timer(req);
                blk_rq_timed_out(req);
+       }
 }
 EXPORT_SYMBOL_GPL(blk_abort_request);
 
index da722eb..c43926d 100644 (file)
@@ -72,8 +72,6 @@ void blk_dequeue_request(struct request *rq);
 void __blk_queue_free_tags(struct request_queue *q);
 bool __blk_end_bidi_request(struct request *rq, int error,
                            unsigned int nr_bytes, unsigned int bidi_bytes);
-int blk_queue_enter(struct request_queue *q, gfp_t gfp);
-void blk_queue_exit(struct request_queue *q);
 void blk_freeze_queue(struct request_queue *q);
 
 static inline void blk_queue_enter_live(struct request_queue *q)
index 3de89d4..a163c48 100644 (file)
@@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq,
 static int noop_dispatch(struct request_queue *q, int force)
 {
        struct noop_data *nd = q->elevator->elevator_data;
+       struct request *rq;
 
-       if (!list_empty(&nd->queue)) {
-               struct request *rq;
-               rq = list_entry(nd->queue.next, struct request, queuelist);
+       rq = list_first_entry_or_null(&nd->queue, struct request, queuelist);
+       if (rq) {
                list_del_init(&rq->queuelist);
                elv_dispatch_sort(q, rq);
                return 1;
@@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq)
 
        if (rq->queuelist.prev == &nd->queue)
                return NULL;
-       return list_entry(rq->queuelist.prev, struct request, queuelist);
+       return list_prev_entry(rq, queuelist);
 }
 
 static struct request *
@@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq)
 
        if (rq->queuelist.next == &nd->queue)
                return NULL;
-       return list_entry(rq->queuelist.next, struct request, queuelist);
+       return list_next_entry(rq, queuelist);
 }
 
 static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
index c2c48ec..621317a 100644 (file)
@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state)
        Sector sect;
        unsigned char *data;
        int slot, blocks_in_map;
-       unsigned secsize;
+       unsigned secsize, datasize, partoffset;
 #ifdef CONFIG_PPC_PMAC
        int found_root = 0;
        int found_root_goodness = 0;
@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state)
        }
        secsize = be16_to_cpu(md->block_size);
        put_dev_sector(sect);
-       data = read_part_sector(state, secsize/512, &sect);
+       datasize = round_down(secsize, 512);
+       data = read_part_sector(state, datasize / 512, &sect);
        if (!data)
                return -1;
-       part = (struct mac_partition *) (data + secsize%512);
+       partoffset = secsize % 512;
+       if (partoffset + sizeof(*part) > datasize)
+               return -1;
+       part = (struct mac_partition *) (data + partoffset);
        if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
                put_dev_sector(sect);
                return 0;               /* not a MacOS disk */
index 73d0391..795d0ca 100644 (file)
@@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810)           += video/fbdev/i810/
 obj-$(CONFIG_FB_INTEL)          += video/fbdev/intelfb/
 
 obj-$(CONFIG_PARPORT)          += parport/
+obj-$(CONFIG_NVM)              += lightnvm/
 obj-y                          += base/ block/ misc/ mfd/ nfc/
 obj-$(CONFIG_LIBNVDIMM)                += nvdimm/
 obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
@@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS)           += nubus/
 obj-y                          += macintosh/
 obj-$(CONFIG_IDE)              += ide/
 obj-$(CONFIG_SCSI)             += scsi/
-obj-$(CONFIG_NVM)              += lightnvm/
 obj-y                          += nvme/
 obj-$(CONFIG_ATA)              += ata/
 obj-$(CONFIG_TARGET_CORE)      += target/
index 3c083d2..6730f96 100644 (file)
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
 
 static int register_pcc_channel(int pcc_subspace_idx)
 {
-       struct acpi_pcct_subspace *cppc_ss;
+       struct acpi_pcct_hw_reduced *cppc_ss;
        unsigned int len;
 
        if (pcc_subspace_idx >= 0) {
index f61a7c8..b420fb4 100644 (file)
@@ -1103,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
        }
 
 err_exit:
-       if (result && q)
+       if (result)
                acpi_ec_delete_query(q);
        if (data)
                *data = value;
index bf034f8..2fa8304 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
-#include <linux/dmi.h>
 #include "sbshc.h"
 
 #define PREFIX "ACPI: "
@@ -30,6 +29,7 @@ struct acpi_smb_hc {
        u8 query_bit;
        smbus_alarm_callback callback;
        void *context;
+       bool done;
 };
 
 static int acpi_smbus_hc_add(struct acpi_device *device);
@@ -88,8 +88,6 @@ enum acpi_smb_offset {
        ACPI_SMB_ALARM_DATA = 0x26,     /* 2 bytes alarm data */
 };
 
-static bool macbook;
-
 static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
 {
        return ec_read(hc->offset + address, data);
@@ -100,27 +98,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
        return ec_write(hc->offset + address, data);
 }
 
-static inline int smb_check_done(struct acpi_smb_hc *hc)
-{
-       union acpi_smb_status status = {.raw = 0};
-       smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
-       return status.fields.done && (status.fields.status == SMBUS_OK);
-}
-
 static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
 {
-       if (wait_event_timeout(hc->wait, smb_check_done(hc),
-                              msecs_to_jiffies(timeout)))
+       if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout)))
                return 0;
-       /*
-        * After the timeout happens, OS will try to check the status of SMbus.
-        * If the status is what OS expected, it will be regarded as the bogus
-        * timeout.
-        */
-       if (smb_check_done(hc))
-               return 0;
-       else
-               return -ETIME;
+       return -ETIME;
 }
 
 static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
@@ -135,8 +117,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
        }
 
        mutex_lock(&hc->lock);
-       if (macbook)
-               udelay(5);
+       hc->done = false;
        if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
                goto end;
        if (temp) {
@@ -235,8 +216,10 @@ static int smbus_alarm(void *context)
        if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
                return 0;
        /* Check if it is only a completion notify */
-       if (status.fields.done)
+       if (status.fields.done && status.fields.status == SMBUS_OK) {
+               hc->done = true;
                wake_up(&hc->wait);
+       }
        if (!status.fields.alarm)
                return 0;
        mutex_lock(&hc->lock);
@@ -262,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
                              acpi_handle handle, acpi_ec_query_func func,
                              void *data);
 
-static int macbook_dmi_match(const struct dmi_system_id *d)
-{
-       pr_debug("Detected MacBook, enabling workaround\n");
-       macbook = true;
-       return 0;
-}
-
-static struct dmi_system_id acpi_smbus_dmi_table[] = {
-       { macbook_dmi_match, "Apple MacBook", {
-         DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-         DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
-       },
-       { },
-};
-
 static int acpi_smbus_hc_add(struct acpi_device *device)
 {
        int status;
        unsigned long long val;
        struct acpi_smb_hc *hc;
 
-       dmi_check_system(acpi_smbus_dmi_table);
-
        if (!device)
                return -EINVAL;
 
index eb6e674..0d77cd6 100644 (file)
@@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
        struct wake_irq *wirq;
        int err;
 
+       if (irq < 0)
+               return -EINVAL;
+
        wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
        if (!wirq)
                return -ENOMEM;
@@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
        struct wake_irq *wirq;
        int err;
 
+       if (irq < 0)
+               return -EINVAL;
+
        wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
        if (!wirq)
                return -ENOMEM;
index a28a562..3457ac8 100644 (file)
@@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd)
        sector_t capacity;
        unsigned int index = 0;
        struct kobject *kobj;
-       unsigned char thd_name[16];
 
        if (dd->disk)
                goto skip_create_disk; /* hw init done, before rebuild */
@@ -3958,10 +3957,9 @@ skip_create_disk:
        }
 
 start_service_thread:
-       sprintf(thd_name, "mtip_svc_thd_%02d", index);
        dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
-                                               dd, dd->numa_node, "%s",
-                                               thd_name);
+                                               dd, dd->numa_node,
+                                               "mtip_svc_thd_%02d", index);
 
        if (IS_ERR(dd->mtip_svc_handler)) {
                dev_err(&dd->pdev->dev, "service thread failed to start\n");
index 6255d1c..5c8ba54 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/slab.h>
 #include <linux/blk-mq.h>
 #include <linux/hrtimer.h>
+#include <linux/lightnvm.h>
 
 struct nullb_cmd {
        struct list_head list;
@@ -39,12 +40,14 @@ struct nullb {
 
        struct nullb_queue *queues;
        unsigned int nr_queues;
+       char disk_name[DISK_NAME_LEN];
 };
 
 static LIST_HEAD(nullb_list);
 static struct mutex lock;
 static int null_major;
 static int nullb_indexes;
+static struct kmem_cache *ppa_cache;
 
 struct completion_queue {
        struct llist_head list;
@@ -119,6 +122,10 @@ static int nr_devices = 2;
 module_param(nr_devices, int, S_IRUGO);
 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
 
+static bool use_lightnvm;
+module_param(use_lightnvm, bool, S_IRUGO);
+MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
+
 static int irqmode = NULL_IRQ_SOFTIRQ;
 
 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -427,15 +434,156 @@ static void null_del_dev(struct nullb *nullb)
 {
        list_del_init(&nullb->list);
 
-       del_gendisk(nullb->disk);
+       if (use_lightnvm)
+               nvm_unregister(nullb->disk_name);
+       else
+               del_gendisk(nullb->disk);
        blk_cleanup_queue(nullb->q);
        if (queue_mode == NULL_Q_MQ)
                blk_mq_free_tag_set(&nullb->tag_set);
-       put_disk(nullb->disk);
+       if (!use_lightnvm)
+               put_disk(nullb->disk);
        cleanup_queues(nullb);
        kfree(nullb);
 }
 
+#ifdef CONFIG_NVM
+
+static void null_lnvm_end_io(struct request *rq, int error)
+{
+       struct nvm_rq *rqd = rq->end_io_data;
+       struct nvm_dev *dev = rqd->dev;
+
+       dev->mt->end_io(rqd, error);
+
+       blk_put_request(rq);
+}
+
+static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
+{
+       struct request *rq;
+       struct bio *bio = rqd->bio;
+
+       rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
+       if (IS_ERR(rq))
+               return -ENOMEM;
+
+       rq->cmd_type = REQ_TYPE_DRV_PRIV;
+       rq->__sector = bio->bi_iter.bi_sector;
+       rq->ioprio = bio_prio(bio);
+
+       if (bio_has_data(bio))
+               rq->nr_phys_segments = bio_phys_segments(q, bio);
+
+       rq->__data_len = bio->bi_iter.bi_size;
+       rq->bio = rq->biotail = bio;
+
+       rq->end_io_data = rqd;
+
+       blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
+
+       return 0;
+}
+
+static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
+{
+       sector_t size = gb * 1024 * 1024 * 1024ULL;
+       sector_t blksize;
+       struct nvm_id_group *grp;
+
+       id->ver_id = 0x1;
+       id->vmnt = 0;
+       id->cgrps = 1;
+       id->cap = 0x3;
+       id->dom = 0x1;
+
+       id->ppaf.blk_offset = 0;
+       id->ppaf.blk_len = 16;
+       id->ppaf.pg_offset = 16;
+       id->ppaf.pg_len = 16;
+       id->ppaf.sect_offset = 32;
+       id->ppaf.sect_len = 8;
+       id->ppaf.pln_offset = 40;
+       id->ppaf.pln_len = 8;
+       id->ppaf.lun_offset = 48;
+       id->ppaf.lun_len = 8;
+       id->ppaf.ch_offset = 56;
+       id->ppaf.ch_len = 8;
+
+       do_div(size, bs); /* convert size to pages */
+       do_div(size, 256); /* concert size to pgs pr blk */
+       grp = &id->groups[0];
+       grp->mtype = 0;
+       grp->fmtype = 0;
+       grp->num_ch = 1;
+       grp->num_pg = 256;
+       blksize = size;
+       do_div(size, (1 << 16));
+       grp->num_lun = size + 1;
+       do_div(blksize, grp->num_lun);
+       grp->num_blk = blksize;
+       grp->num_pln = 1;
+
+       grp->fpg_sz = bs;
+       grp->csecs = bs;
+       grp->trdt = 25000;
+       grp->trdm = 25000;
+       grp->tprt = 500000;
+       grp->tprm = 500000;
+       grp->tbet = 1500000;
+       grp->tbem = 1500000;
+       grp->mpos = 0x010101; /* single plane rwe */
+       grp->cpar = hw_queue_depth;
+
+       return 0;
+}
+
+static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
+{
+       mempool_t *virtmem_pool;
+
+       virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
+       if (!virtmem_pool) {
+               pr_err("null_blk: Unable to create virtual memory pool\n");
+               return NULL;
+       }
+
+       return virtmem_pool;
+}
+
+static void null_lnvm_destroy_dma_pool(void *pool)
+{
+       mempool_destroy(pool);
+}
+
+static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
+                               gfp_t mem_flags, dma_addr_t *dma_handler)
+{
+       return mempool_alloc(pool, mem_flags);
+}
+
+static void null_lnvm_dev_dma_free(void *pool, void *entry,
+                                                       dma_addr_t dma_handler)
+{
+       mempool_free(entry, pool);
+}
+
+static struct nvm_dev_ops null_lnvm_dev_ops = {
+       .identity               = null_lnvm_id,
+       .submit_io              = null_lnvm_submit_io,
+
+       .create_dma_pool        = null_lnvm_create_dma_pool,
+       .destroy_dma_pool       = null_lnvm_destroy_dma_pool,
+       .dev_dma_alloc          = null_lnvm_dev_dma_alloc,
+       .dev_dma_free           = null_lnvm_dev_dma_free,
+
+       /* Simulate nvme protocol restriction */
+       .max_phys_sect          = 64,
+};
+#else
+static struct nvm_dev_ops null_lnvm_dev_ops;
+#endif /* CONFIG_NVM */
+
 static int null_open(struct block_device *bdev, fmode_t mode)
 {
        return 0;
@@ -575,11 +723,6 @@ static int null_add_dev(void)
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
        queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
 
-       disk = nullb->disk = alloc_disk_node(1, home_node);
-       if (!disk) {
-               rv = -ENOMEM;
-               goto out_cleanup_blk_queue;
-       }
 
        mutex_lock(&lock);
        list_add_tail(&nullb->list, &nullb_list);
@@ -589,6 +732,21 @@ static int null_add_dev(void)
        blk_queue_logical_block_size(nullb->q, bs);
        blk_queue_physical_block_size(nullb->q, bs);
 
+       sprintf(nullb->disk_name, "nullb%d", nullb->index);
+
+       if (use_lightnvm) {
+               rv = nvm_register(nullb->q, nullb->disk_name,
+                                                       &null_lnvm_dev_ops);
+               if (rv)
+                       goto out_cleanup_blk_queue;
+               goto done;
+       }
+
+       disk = nullb->disk = alloc_disk_node(1, home_node);
+       if (!disk) {
+               rv = -ENOMEM;
+               goto out_cleanup_lightnvm;
+       }
        size = gb * 1024 * 1024 * 1024ULL;
        set_capacity(disk, size >> 9);
 
@@ -598,10 +756,15 @@ static int null_add_dev(void)
        disk->fops              = &null_fops;
        disk->private_data      = nullb;
        disk->queue             = nullb->q;
-       sprintf(disk->disk_name, "nullb%d", nullb->index);
+       strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+
        add_disk(disk);
+done:
        return 0;
 
+out_cleanup_lightnvm:
+       if (use_lightnvm)
+               nvm_unregister(nullb->disk_name);
 out_cleanup_blk_queue:
        blk_cleanup_queue(nullb->q);
 out_cleanup_tags:
@@ -625,6 +788,18 @@ static int __init null_init(void)
                bs = PAGE_SIZE;
        }
 
+       if (use_lightnvm && bs != 4096) {
+               pr_warn("null_blk: LightNVM only supports 4k block size\n");
+               pr_warn("null_blk: defaults block size to 4k\n");
+               bs = 4096;
+       }
+
+       if (use_lightnvm && queue_mode != NULL_Q_MQ) {
+               pr_warn("null_blk: LightNVM only supported for blk-mq\n");
+               pr_warn("null_blk: defaults queue mode to blk-mq\n");
+               queue_mode = NULL_Q_MQ;
+       }
+
        if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
                if (submit_queues < nr_online_nodes) {
                        pr_warn("null_blk: submit_queues param is set to %u.",
@@ -655,15 +830,27 @@ static int __init null_init(void)
        if (null_major < 0)
                return null_major;
 
+       if (use_lightnvm) {
+               ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
+                                                               0, 0, NULL);
+               if (!ppa_cache) {
+                       pr_err("null_blk: unable to create ppa cache\n");
+                       return -ENOMEM;
+               }
+       }
+
        for (i = 0; i < nr_devices; i++) {
                if (null_add_dev()) {
                        unregister_blkdev(null_major, "nullb");
-                       return -EINVAL;
+                       goto err_ppa;
                }
        }
 
        pr_info("null: module loaded\n");
        return 0;
+err_ppa:
+       kmem_cache_destroy(ppa_cache);
+       return -EINVAL;
 }
 
 static void __exit null_exit(void)
@@ -678,6 +865,8 @@ static void __exit null_exit(void)
                null_del_dev(nullb);
        }
        mutex_unlock(&lock);
+
+       kmem_cache_destroy(ppa_cache);
 }
 
 module_init(null_init);
index 9f18569..bf500e0 100644 (file)
@@ -117,7 +117,7 @@ static struct platform_driver omap_ocp2scp_driver = {
 
 module_platform_driver(omap_ocp2scp_driver);
 
-MODULE_ALIAS("platform: omap-ocp2scp");
+MODULE_ALIAS("platform:omap-ocp2scp");
 MODULE_AUTHOR("Texas Instruments Inc.");
 MODULE_DESCRIPTION("OMAP OCP2SCP driver");
 MODULE_LICENSE("GPL v2");
index 654f6f3..55fe902 100644 (file)
@@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
        return rv;
 }
 
-static void start_check_enables(struct smi_info *smi_info)
+static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+{
+       smi_info->last_timeout_jiffies = jiffies;
+       mod_timer(&smi_info->si_timer, new_val);
+       smi_info->timer_running = true;
+}
+
+/*
+ * Start a new message and (re)start the timer and thread.
+ */
+static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
+                         unsigned int size)
+{
+       smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+       if (smi_info->thread)
+               wake_up_process(smi_info->thread);
+
+       smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
+}
+
+static void start_check_enables(struct smi_info *smi_info, bool start_timer)
 {
        unsigned char msg[2];
 
        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 
-       smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+       if (start_timer)
+               start_new_msg(smi_info, msg, 2);
+       else
+               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
        smi_info->si_state = SI_CHECKING_ENABLES;
 }
 
-static void start_clear_flags(struct smi_info *smi_info)
+static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
 {
        unsigned char msg[3];
 
@@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info)
        msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
        msg[2] = WDT_PRE_TIMEOUT_INT;
 
-       smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+       if (start_timer)
+               start_new_msg(smi_info, msg, 3);
+       else
+               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
        smi_info->si_state = SI_CLEARING_FLAGS;
 }
 
@@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
        smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
        smi_info->curr_msg->data_size = 2;
 
-       smi_info->handlers->start_transaction(
-               smi_info->si_sm,
-               smi_info->curr_msg->data,
-               smi_info->curr_msg->data_size);
+       start_new_msg(smi_info, smi_info->curr_msg->data,
+                     smi_info->curr_msg->data_size);
        smi_info->si_state = SI_GETTING_MESSAGES;
 }
 
@@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info)
        smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
        smi_info->curr_msg->data_size = 2;
 
-       smi_info->handlers->start_transaction(
-               smi_info->si_sm,
-               smi_info->curr_msg->data,
-               smi_info->curr_msg->data_size);
+       start_new_msg(smi_info, smi_info->curr_msg->data,
+                     smi_info->curr_msg->data_size);
        smi_info->si_state = SI_GETTING_EVENTS;
 }
 
-static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
-{
-       smi_info->last_timeout_jiffies = jiffies;
-       mod_timer(&smi_info->si_timer, new_val);
-       smi_info->timer_running = true;
-}
-
 /*
  * When we have a situtaion where we run out of memory and cannot
  * allocate messages, we just leave them in the BMC and run the system
@@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
  * Note that we cannot just use disable_irq(), since the interrupt may
  * be shared.
  */
-static inline bool disable_si_irq(struct smi_info *smi_info)
+static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
 {
        if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = true;
-               start_check_enables(smi_info);
+               start_check_enables(smi_info, start_timer);
                return true;
        }
        return false;
@@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
 {
        if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = false;
-               start_check_enables(smi_info);
+               start_check_enables(smi_info, true);
                return true;
        }
        return false;
@@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
 
        msg = ipmi_alloc_smi_msg();
        if (!msg) {
-               if (!disable_si_irq(smi_info))
+               if (!disable_si_irq(smi_info, true))
                        smi_info->si_state = SI_NORMAL;
        } else if (enable_si_irq(smi_info)) {
                ipmi_free_smi_msg(msg);
@@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info)
                /* Watchdog pre-timeout */
                smi_inc_stat(smi_info, watchdog_pretimeouts);
 
-               start_clear_flags(smi_info);
+               start_clear_flags(smi_info, true);
                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
                if (smi_info->intf)
                        ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
                        msg[1] = IPMI_GET_MSG_FLAGS_CMD;
 
-                       smi_info->handlers->start_transaction(
-                               smi_info->si_sm, msg, 2);
+                       start_new_msg(smi_info, msg, 2);
                        smi_info->si_state = SI_GETTING_FLAGS;
                        goto restart;
                }
@@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                 * disable and messages disabled.
                 */
                if (smi_info->supports_event_msg_buff || smi_info->irq) {
-                       start_check_enables(smi_info);
+                       start_check_enables(smi_info, true);
                } else {
                        smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
                        if (!smi_info->curr_msg)
@@ -920,6 +935,13 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                }
                goto restart;
        }
+
+       if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
+               /* Ok it if fails, the timer will just go off. */
+               if (del_timer(&smi_info->si_timer))
+                       smi_info->timer_running = false;
+       }
+
  out:
        return si_sm_result;
 }
@@ -2560,6 +2582,7 @@ static const struct of_device_id of_ipmi_match[] = {
          .data = (void *)(unsigned long) SI_BT },
        {},
 };
+MODULE_DEVICE_TABLE(of, of_ipmi_match);
 
 static int of_ipmi_probe(struct platform_device *dev)
 {
@@ -2646,7 +2669,6 @@ static int of_ipmi_probe(struct platform_device *dev)
        }
        return 0;
 }
-MODULE_DEVICE_TABLE(of, of_ipmi_match);
 #else
 #define of_ipmi_match NULL
 static int of_ipmi_probe(struct platform_device *dev)
@@ -3613,7 +3635,7 @@ static int try_smi_init(struct smi_info *new_smi)
         * Start clearing the flags before we enable interrupts or the
         * timer to avoid racing with the timer.
         */
-       start_clear_flags(new_smi);
+       start_clear_flags(new_smi, false);
 
        /*
         * IRQ is defined to be set when non-zero.  req_events will
@@ -3908,7 +3930,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
                poll(to_clean);
                schedule_timeout_uninterruptible(1);
        }
-       disable_si_irq(to_clean);
+       disable_si_irq(to_clean, false);
        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
                poll(to_clean);
                schedule_timeout_uninterruptible(1);
index 0ac3bd1..096f0ce 100644 (file)
@@ -153,6 +153,9 @@ static int timeout = 10;
 /* The pre-timeout is disabled by default. */
 static int pretimeout;
 
+/* Default timeout to set on panic */
+static int panic_wdt_timeout = 255;
+
 /* Default action is to reset the board on a timeout. */
 static unsigned char action_val = WDOG_TIMEOUT_RESET;
 
@@ -293,6 +296,9 @@ MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
 module_param(pretimeout, timeout, 0644);
 MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
 
+module_param(panic_wdt_timeout, timeout, 0644);
+MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
+
 module_param_cb(action, &param_ops_str, action_op, 0644);
 MODULE_PARM_DESC(action, "Timeout action. One of: "
                 "reset, none, power_cycle, power_off.");
@@ -1189,7 +1195,7 @@ static int wdog_panic_handler(struct notifier_block *this,
                /* Make sure we do this only once. */
                panic_event_handled = 1;
 
-               timeout = 255;
+               timeout = panic_wdt_timeout;
                pretimeout = 0;
                panic_halt_ipmi_set_timeout();
        }
index 71cfdf7..2eb5f0e 100644 (file)
@@ -1,4 +1,5 @@
 menu "Clock Source drivers"
+       depends on !ARCH_USES_GETTIMEOFFSET
 
 config CLKSRC_OF
        bool
index 10202f1..517e1c7 100644 (file)
@@ -203,7 +203,7 @@ static int __init ftm_clockevent_init(unsigned long freq, int irq)
        int err;
 
        ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN);
-       ftm_writel(~0UL, priv->clkevt_base + FTM_MOD);
+       ftm_writel(~0u, priv->clkevt_base + FTM_MOD);
 
        ftm_reset_counter(priv->clkevt_base);
 
@@ -230,7 +230,7 @@ static int __init ftm_clocksource_init(unsigned long freq)
        int err;
 
        ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN);
-       ftm_writel(~0UL, priv->clksrc_base + FTM_MOD);
+       ftm_writel(~0u, priv->clksrc_base + FTM_MOD);
 
        ftm_reset_counter(priv->clksrc_base);
 
index 1582c1c..235a1ba 100644 (file)
@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
 config ARM_MT8173_CPUFREQ
        bool "Mediatek MT8173 CPUFreq support"
        depends on ARCH_MEDIATEK && REGULATOR
+       depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
        depends on !CPU_THERMAL || THERMAL=y
        select PM_OPP
        help
@@ -201,7 +202,7 @@ config ARM_SA1110_CPUFREQ
 
 config ARM_SCPI_CPUFREQ
         tristate "SCPI based CPUfreq driver"
-       depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL
+       depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
         help
          This adds the CPUfreq driver support for ARM big.LITTLE platforms
          using SCPI protocol for CPU power management.
index adbd1de..c59bdcb 100644 (file)
@@ -5,7 +5,6 @@
 config X86_INTEL_PSTATE
        bool "Intel P state control"
        depends on X86
-       select ACPI_PROCESSOR if ACPI
        help
           This driver provides a P state for Intel core processors.
          The driver implements an internal governor and will become
index e8cb334..7c0bdfb 100644 (file)
@@ -98,10 +98,11 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
        policy->max = cpu->perf_caps.highest_perf;
        policy->cpuinfo.min_freq = policy->min;
        policy->cpuinfo.max_freq = policy->max;
+       policy->shared_type = cpu->shared_type;
 
        if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
                cpumask_copy(policy->cpus, cpu->shared_cpu_map);
-       else {
+       else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
                /* Support only SW_ANY for now. */
                pr_debug("Unsupported CPU co-ord type\n");
                return -EFAULT;
index 7c48e73..a83c995 100644 (file)
@@ -1401,13 +1401,10 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
        }
 
        cpumask_clear_cpu(cpu, policy->real_cpus);
+       remove_cpu_dev_symlink(policy, cpu);
 
-       if (cpumask_empty(policy->real_cpus)) {
+       if (cpumask_empty(policy->real_cpus))
                cpufreq_policy_free(policy, true);
-               return;
-       }
-
-       remove_cpu_dev_symlink(policy, cpu);
 }
 
 static void handle_update(struct work_struct *work)
index 2e31d09..4d07cbd 100644 (file)
 #include <asm/cpu_device_id.h>
 #include <asm/cpufeature.h>
 
-#if IS_ENABLED(CONFIG_ACPI)
-#include <acpi/processor.h>
-#endif
-
-#define BYT_RATIOS             0x66a
-#define BYT_VIDS               0x66b
-#define BYT_TURBO_RATIOS       0x66c
-#define BYT_TURBO_VIDS         0x66d
+#define ATOM_RATIOS            0x66a
+#define ATOM_VIDS              0x66b
+#define ATOM_TURBO_RATIOS      0x66c
+#define ATOM_TURBO_VIDS                0x66d
 
 #define FRAC_BITS 8
 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
@@ -117,9 +113,6 @@ struct cpudata {
        u64     prev_mperf;
        u64     prev_tsc;
        struct sample sample;
-#if IS_ENABLED(CONFIG_ACPI)
-       struct acpi_processor_performance acpi_perf_data;
-#endif
 };
 
 static struct cpudata **all_cpu_data;
@@ -150,7 +143,6 @@ struct cpu_defaults {
 static struct pstate_adjust_policy pid_params;
 static struct pstate_funcs pstate_funcs;
 static int hwp_active;
-static int no_acpi_perf;
 
 struct perf_limits {
        int no_turbo;
@@ -163,8 +155,6 @@ struct perf_limits {
        int max_sysfs_pct;
        int min_policy_pct;
        int min_sysfs_pct;
-       int max_perf_ctl;
-       int min_perf_ctl;
 };
 
 static struct perf_limits performance_limits = {
@@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = {
        .max_sysfs_pct = 100,
        .min_policy_pct = 0,
        .min_sysfs_pct = 0,
-       .max_perf_ctl = 0,
-       .min_perf_ctl = 0,
 };
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
@@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits;
 static struct perf_limits *limits = &powersave_limits;
 #endif
 
-#if IS_ENABLED(CONFIG_ACPI)
-/*
- * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
- * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
- * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
- * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
- * target ratio 0x17. The _PSS control value stores in a format which can be
- * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
- * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
- * This function converts the _PSS control value to intel pstate driver format
- * for comparison and assignment.
- */
-static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
-{
-       return cpu->acpi_perf_data.states[index].control >> 8;
-}
-
-static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
-{
-       struct cpudata *cpu;
-       int ret;
-       bool turbo_absent = false;
-       int max_pstate_index;
-       int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
-       int i;
-
-       cpu = all_cpu_data[policy->cpu];
-
-       pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
-                cpu->pstate.min_pstate, cpu->pstate.max_pstate,
-                cpu->pstate.turbo_pstate);
-
-       if (!cpu->acpi_perf_data.shared_cpu_map &&
-           zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
-                                   GFP_KERNEL, cpu_to_node(policy->cpu))) {
-               return -ENOMEM;
-       }
-
-       ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
-                                                 policy->cpu);
-       if (ret)
-               return ret;
-
-       /*
-        * Check if the control value in _PSS is for PERF_CTL MSR, which should
-        * guarantee that the states returned by it map to the states in our
-        * list directly.
-        */
-       if (cpu->acpi_perf_data.control_register.space_id !=
-                                               ACPI_ADR_SPACE_FIXED_HARDWARE)
-               return -EIO;
-
-       pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
-       for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
-               pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
-                        (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
-                        (u32) cpu->acpi_perf_data.states[i].core_frequency,
-                        (u32) cpu->acpi_perf_data.states[i].power,
-                        (u32) cpu->acpi_perf_data.states[i].control);
-
-       /*
-        * If there is only one entry _PSS, simply ignore _PSS and continue as
-        * usual without taking _PSS into account
-        */
-       if (cpu->acpi_perf_data.state_count < 2)
-               return 0;
-
-       turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
-       min_pss_ctl = convert_to_native_pstate_format(cpu,
-                                       cpu->acpi_perf_data.state_count - 1);
-       /* Check if there is a turbo freq in _PSS */
-       if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
-           turbo_pss_ctl > cpu->pstate.min_pstate) {
-               pr_debug("intel_pstate: no turbo range exists in _PSS\n");
-               limits->no_turbo = limits->turbo_disabled = 1;
-               cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
-               turbo_absent = true;
-       }
-
-       /* Check if the max non turbo p state < Intel P state max */
-       max_pstate_index = turbo_absent ? 0 : 1;
-       max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
-       if (max_pss_ctl < cpu->pstate.max_pstate &&
-           max_pss_ctl > cpu->pstate.min_pstate)
-               cpu->pstate.max_pstate = max_pss_ctl;
-
-       /* check If min perf > Intel P State min */
-       if (min_pss_ctl > cpu->pstate.min_pstate &&
-           min_pss_ctl < cpu->pstate.max_pstate) {
-               cpu->pstate.min_pstate = min_pss_ctl;
-               policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
-       }
-
-       if (turbo_absent)
-               policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
-                                               cpu->pstate.scaling;
-       else {
-               policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
-                                               cpu->pstate.scaling;
-               /*
-                * The _PSS table doesn't contain whole turbo frequency range.
-                * This just contains +1 MHZ above the max non turbo frequency,
-                * with control value corresponding to max turbo ratio. But
-                * when cpufreq set policy is called, it will call with this
-                * max frequency, which will cause a reduced performance as
-                * this driver uses real max turbo frequency as the max
-                * frequeny. So correct this frequency in _PSS table to
-                * correct max turbo frequency based on the turbo ratio.
-                * Also need to convert to MHz as _PSS freq is in MHz.
-                */
-               cpu->acpi_perf_data.states[0].core_frequency =
-                                               turbo_pss_ctl * 100;
-       }
-
-       pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
-                cpu->pstate.min_pstate, cpu->pstate.max_pstate,
-                cpu->pstate.turbo_pstate);
-       pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
-                policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
-
-       return 0;
-}
-
-static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
-{
-       struct cpudata *cpu;
-
-       if (!no_acpi_perf)
-               return 0;
-
-       cpu = all_cpu_data[policy->cpu];
-       acpi_processor_unregister_performance(policy->cpu);
-       return 0;
-}
-
-#else
-static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-
-static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-#endif
-
 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
                             int deadband, int integral) {
        pid->setpoint = setpoint;
@@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
        wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
 }
 
-static int byt_get_min_pstate(void)
+static int atom_get_min_pstate(void)
 {
        u64 value;
 
-       rdmsrl(BYT_RATIOS, value);
+       rdmsrl(ATOM_RATIOS, value);
        return (value >> 8) & 0x7F;
 }
 
-static int byt_get_max_pstate(void)
+static int atom_get_max_pstate(void)
 {
        u64 value;
 
-       rdmsrl(BYT_RATIOS, value);
+       rdmsrl(ATOM_RATIOS, value);
        return (value >> 16) & 0x7F;
 }
 
-static int byt_get_turbo_pstate(void)
+static int atom_get_turbo_pstate(void)
 {
        u64 value;
 
-       rdmsrl(BYT_TURBO_RATIOS, value);
+       rdmsrl(ATOM_TURBO_RATIOS, value);
        return value & 0x7F;
 }
 
-static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+static void atom_set_pstate(struct cpudata *cpudata, int pstate)
 {
        u64 val;
        int32_t vid_fp;
@@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
        wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
 }
 
-#define BYT_BCLK_FREQS 5
-static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
-
-static int byt_get_scaling(void)
+static int silvermont_get_scaling(void)
 {
        u64 value;
        int i;
+       /* Defined in Table 35-6 from SDM (Sept 2015) */
+       static int silvermont_freq_table[] = {
+               83300, 100000, 133300, 116700, 80000};
 
        rdmsrl(MSR_FSB_FREQ, value);
-       i = value & 0x3;
+       i = value & 0x7;
+       WARN_ON(i > 4);
 
-       BUG_ON(i > BYT_BCLK_FREQS);
+       return silvermont_freq_table[i];
+}
 
-       return byt_freq_table[i] * 100;
+static int airmont_get_scaling(void)
+{
+       u64 value;
+       int i;
+       /* Defined in Table 35-10 from SDM (Sept 2015) */
+       static int airmont_freq_table[] = {
+               83300, 100000, 133300, 116700, 80000,
+               93300, 90000, 88900, 87500};
+
+       rdmsrl(MSR_FSB_FREQ, value);
+       i = value & 0xF;
+       WARN_ON(i > 8);
+
+       return airmont_freq_table[i];
 }
 
-static void byt_get_vid(struct cpudata *cpudata)
+static void atom_get_vid(struct cpudata *cpudata)
 {
        u64 value;
 
-       rdmsrl(BYT_VIDS, value);
+       rdmsrl(ATOM_VIDS, value);
        cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
        cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
        cpudata->vid.ratio = div_fp(
@@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata)
                int_tofp(cpudata->pstate.max_pstate -
                        cpudata->pstate.min_pstate));
 
-       rdmsrl(BYT_TURBO_VIDS, value);
+       rdmsrl(ATOM_TURBO_VIDS, value);
        cpudata->vid.turbo = value & 0x7f;
 }
 
@@ -885,7 +741,7 @@ static struct cpu_defaults core_params = {
        },
 };
 
-static struct cpu_defaults byt_params = {
+static struct cpu_defaults silvermont_params = {
        .pid_policy = {
                .sample_rate_ms = 10,
                .deadband = 0,
@@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = {
                .i_gain_pct = 4,
        },
        .funcs = {
-               .get_max = byt_get_max_pstate,
-               .get_max_physical = byt_get_max_pstate,
-               .get_min = byt_get_min_pstate,
-               .get_turbo = byt_get_turbo_pstate,
-               .set = byt_set_pstate,
-               .get_scaling = byt_get_scaling,
-               .get_vid = byt_get_vid,
+               .get_max = atom_get_max_pstate,
+               .get_max_physical = atom_get_max_pstate,
+               .get_min = atom_get_min_pstate,
+               .get_turbo = atom_get_turbo_pstate,
+               .set = atom_set_pstate,
+               .get_scaling = silvermont_get_scaling,
+               .get_vid = atom_get_vid,
+       },
+};
+
+static struct cpu_defaults airmont_params = {
+       .pid_policy = {
+               .sample_rate_ms = 10,
+               .deadband = 0,
+               .setpoint = 60,
+               .p_gain_pct = 14,
+               .d_gain_pct = 0,
+               .i_gain_pct = 4,
+       },
+       .funcs = {
+               .get_max = atom_get_max_pstate,
+               .get_max_physical = atom_get_max_pstate,
+               .get_min = atom_get_min_pstate,
+               .get_turbo = atom_get_turbo_pstate,
+               .set = atom_set_pstate,
+               .get_scaling = airmont_get_scaling,
+               .get_vid = atom_get_vid,
        },
 };
 
@@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
         * policy, or by cpu specific default values determined through
         * experimentation.
         */
-       if (limits->max_perf_ctl && limits->max_sysfs_pct >=
-                                               limits->max_policy_pct) {
-               *max = limits->max_perf_ctl;
-       } else {
-               max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
-                                       limits->max_perf));
-               *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
-                              cpu->pstate.turbo_pstate);
-       }
+       max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
+       *max = clamp_t(int, max_perf_adj,
+                       cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
 
-       if (limits->min_perf_ctl) {
-               *min = limits->min_perf_ctl;
-       } else {
-               min_perf = fp_toint(mul_fp(int_tofp(max_perf),
-                                   limits->min_perf));
-               *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
-       }
+       min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
+       *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
 }
 
 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
@@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data)
 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(0x2a, core_params),
        ICPU(0x2d, core_params),
-       ICPU(0x37, byt_params),
+       ICPU(0x37, silvermont_params),
        ICPU(0x3a, core_params),
        ICPU(0x3c, core_params),
        ICPU(0x3d, core_params),
@@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(0x45, core_params),
        ICPU(0x46, core_params),
        ICPU(0x47, core_params),
-       ICPU(0x4c, byt_params),
+       ICPU(0x4c, airmont_params),
        ICPU(0x4e, core_params),
        ICPU(0x4f, core_params),
        ICPU(0x5e, core_params),
@@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
 
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
-#if IS_ENABLED(CONFIG_ACPI)
-       struct cpudata *cpu;
-       int i;
-#endif
-       pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
-                policy->cpuinfo.max_freq, policy->max);
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
 
@@ -1242,6 +1101,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
            policy->max >= policy->cpuinfo.max_freq) {
                pr_debug("intel_pstate: set performance\n");
                limits = &performance_limits;
+               if (hwp_active)
+                       intel_pstate_hwp_set();
                return 0;
        }
 
@@ -1249,7 +1110,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        limits = &powersave_limits;
        limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
        limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
-       limits->max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
+       limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
+                                             policy->cpuinfo.max_freq);
        limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
 
        /* Normalize user input to [min_policy_pct, max_policy_pct] */
@@ -1261,6 +1123,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                                   limits->max_sysfs_pct);
        limits->max_perf_pct = max(limits->min_policy_pct,
                                   limits->max_perf_pct);
+       limits->max_perf = round_up(limits->max_perf, 8);
 
        /* Make sure min_perf_pct <= max_perf_pct */
        limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
@@ -1270,23 +1133,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
                                  int_tofp(100));
 
-#if IS_ENABLED(CONFIG_ACPI)
-       cpu = all_cpu_data[policy->cpu];
-       for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
-               int control;
-
-               control = convert_to_native_pstate_format(cpu, i);
-               if (control * cpu->pstate.scaling == policy->max)
-                       limits->max_perf_ctl = control;
-               if (control * cpu->pstate.scaling == policy->min)
-                       limits->min_perf_ctl = control;
-       }
-
-       pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
-                policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
-                limits->max_perf_ctl);
-#endif
-
        if (hwp_active)
                intel_pstate_hwp_set();
 
@@ -1341,30 +1187,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
        policy->cpuinfo.max_freq =
                cpu->pstate.turbo_pstate * cpu->pstate.scaling;
-       if (!no_acpi_perf)
-               intel_pstate_init_perf_limits(policy);
-       /*
-        * If there is no acpi perf data or error, we ignore and use Intel P
-        * state calculated limits, So this is not fatal error.
-        */
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
        cpumask_set_cpu(policy->cpu, policy->cpus);
 
        return 0;
 }
 
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
-{
-       return intel_pstate_exit_perf_limits(policy);
-}
-
 static struct cpufreq_driver intel_pstate_driver = {
        .flags          = CPUFREQ_CONST_LOOPS,
        .verify         = intel_pstate_verify_policy,
        .setpolicy      = intel_pstate_set_policy,
        .get            = intel_pstate_get,
        .init           = intel_pstate_cpu_init,
-       .exit           = intel_pstate_cpu_exit,
        .stop_cpu       = intel_pstate_stop_cpu,
        .name           = "intel_pstate",
 };
@@ -1406,6 +1240,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
 }
 
 #if IS_ENABLED(CONFIG_ACPI)
+#include <acpi/processor.h>
 
 static bool intel_pstate_no_acpi_pss(void)
 {
@@ -1601,9 +1436,6 @@ static int __init intel_pstate_setup(char *str)
                force_load = 1;
        if (!strcmp(str, "hwp_only"))
                hwp_only = 1;
-       if (!strcmp(str, "no_acpi"))
-               no_acpi_perf = 1;
-
        return 0;
 }
 early_param("intel_pstate", intel_pstate_setup);
index 4e55239..53d22eb 100644 (file)
@@ -729,8 +729,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
                return NULL;
 
        dev_info(chan2dev(chan),
-                "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
-               __func__, xt->src_start, xt->dst_start, xt->numf,
+                "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
+               __func__, &xt->src_start, &xt->dst_start, xt->numf,
                xt->frame_size, flags);
 
        /*
@@ -824,8 +824,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
        u32                     ctrla;
        u32                     ctrlb;
 
-       dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
-                       dest, src, len, flags);
+       dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
+                       &dest, &src, len, flags);
 
        if (unlikely(!len)) {
                dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
@@ -938,8 +938,8 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
        void __iomem            *vaddr;
        dma_addr_t              paddr;
 
-       dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__,
-               dest, value, len, flags);
+       dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
+               &dest, value, len, flags);
 
        if (unlikely(!len)) {
                dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1022,8 +1022,8 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
                dma_addr_t dest = sg_dma_address(sg);
                size_t len = sg_dma_len(sg);
 
-               dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n",
-                        __func__, dest, len);
+               dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
+                        __func__, &dest, len);
 
                if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
                        dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
@@ -1439,9 +1439,9 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
        unsigned int            periods = buf_len / period_len;
        unsigned int            i;
 
-       dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
+       dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
                        direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
-                       buf_addr,
+                       &buf_addr,
                        periods, buf_len, period_len);
 
        if (unlikely(!atslave || !buf_len || !period_len)) {
index d1cfc8c..7f58f06 100644 (file)
@@ -385,9 +385,9 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
 static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
 {
        dev_crit(chan2dev(&atchan->chan_common),
-                "  desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
-                lli->saddr, lli->daddr,
-                lli->ctrla, lli->ctrlb, lli->dscr);
+                "  desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n",
+                &lli->saddr, &lli->daddr,
+                lli->ctrla, lli->ctrlb, &lli->dscr);
 }
 
 
index b5e132d..7f039de 100644 (file)
@@ -920,8 +920,8 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
        desc->lld.mbr_cfg = chan_cc;
 
        dev_dbg(chan2dev(chan),
-               "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
-               __func__, desc->lld.mbr_sa, desc->lld.mbr_da,
+               "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
+               __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
                desc->lld.mbr_ubc, desc->lld.mbr_cfg);
 
        /* Chain lld. */
@@ -953,8 +953,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
        if ((xt->numf > 1) && (xt->frame_size > 1))
                return NULL;
 
-       dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
-               __func__, xt->src_start, xt->dst_start, xt->numf,
+       dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
+               __func__, &xt->src_start, &xt->dst_start,       xt->numf,
                xt->frame_size, flags);
 
        src_addr = xt->src_start;
@@ -1179,8 +1179,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
        desc->lld.mbr_cfg = chan_cc;
 
        dev_dbg(chan2dev(chan),
-               "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
-               __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
+               "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
+               __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc,
                desc->lld.mbr_cfg);
 
        return desc;
@@ -1193,8 +1193,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac_desc    *desc;
 
-       dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
-               __func__, dest, len, value, flags);
+       dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
+               __func__, &dest, len, value, flags);
 
        if (unlikely(!len))
                return NULL;
@@ -1229,8 +1229,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
        /* Prepare descriptors. */
        for_each_sg(sgl, sg, sg_len, i) {
-               dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
-                       __func__, sg_dma_address(sg), sg_dma_len(sg),
+               dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
+                       __func__, &sg_dma_address(sg), sg_dma_len(sg),
                        value, flags);
                desc = at_xdmac_memset_create_desc(chan, atchan,
                                                   sg_dma_address(sg),
index 6b03e4e..0675e26 100644 (file)
 
 /* CCCFG register */
 #define GET_NUM_DMACH(x)       (x & 0x7) /* bits 0-2 */
-#define GET_NUM_QDMACH(x)      (x & 0x70 >> 4) /* bits 4-6 */
+#define GET_NUM_QDMACH(x)      ((x & 0x70) >> 4) /* bits 4-6 */
 #define GET_NUM_PAENTRY(x)     ((x & 0x7000) >> 12) /* bits 12-14 */
 #define GET_NUM_EVQUE(x)       ((x & 0x70000) >> 16) /* bits 16-18 */
 #define GET_NUM_REGN(x)                ((x & 0x300000) >> 20) /* bits 20-21 */
@@ -1565,7 +1565,7 @@ static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
        struct platform_device *tc_pdev;
        int ret;
 
-       if (!tc)
+       if (!IS_ENABLED(CONFIG_OF) || !tc)
                return;
 
        tc_pdev = of_find_device_by_node(tc->node);
index 7058d58..0f6fd42 100644 (file)
@@ -1462,7 +1462,7 @@ err_firmware:
 
 #define EVENT_REMAP_CELLS 3
 
-static int __init sdma_event_remap(struct sdma_engine *sdma)
+static int sdma_event_remap(struct sdma_engine *sdma)
 {
        struct device_node *np = sdma->dev->of_node;
        struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
index ebd8a5f..f1bcc2a 100644 (file)
@@ -679,8 +679,11 @@ static int usb_dmac_runtime_suspend(struct device *dev)
        struct usb_dmac *dmac = dev_get_drvdata(dev);
        int i;
 
-       for (i = 0; i < dmac->n_channels; ++i)
+       for (i = 0; i < dmac->n_channels; ++i) {
+               if (!dmac->channels[i].iomem)
+                       break;
                usb_dmac_chan_halt(&dmac->channels[i]);
+       }
 
        return 0;
 }
@@ -799,11 +802,10 @@ static int usb_dmac_probe(struct platform_device *pdev)
        ret = pm_runtime_get_sync(&pdev->dev);
        if (ret < 0) {
                dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
-               return ret;
+               goto error_pm;
        }
 
        ret = usb_dmac_init(dmac);
-       pm_runtime_put(&pdev->dev);
 
        if (ret) {
                dev_err(&pdev->dev, "failed to reset device\n");
@@ -851,10 +853,13 @@ static int usb_dmac_probe(struct platform_device *pdev)
        if (ret < 0)
                goto error;
 
+       pm_runtime_put(&pdev->dev);
        return 0;
 
 error:
        of_dma_controller_free(pdev->dev.of_node);
+       pm_runtime_put(&pdev->dev);
+error_pm:
        pm_runtime_disable(&pdev->dev);
        return ret;
 }
index 6ed7c0f..6b18682 100644 (file)
@@ -113,13 +113,16 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 
 static int mmio_74xx_gpio_probe(struct platform_device *pdev)
 {
-       const struct of_device_id *of_id =
-               of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
+       const struct of_device_id *of_id;
        struct mmio_74xx_gpio_priv *priv;
        struct resource *res;
        void __iomem *dat;
        int err;
 
+       of_id = of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
+       if (!of_id)
+               return -ENODEV;
+
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
index 56d2d02..f7fbb46 100644 (file)
@@ -1122,8 +1122,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
        /* MPUIO is a bit different, reading IRQ status clears it */
        if (bank->is_mpuio) {
                irqc->irq_ack = dummy_irq_chip.irq_ack;
-               irqc->irq_mask = irq_gc_mask_set_bit;
-               irqc->irq_unmask = irq_gc_mask_clr_bit;
                if (!bank->regs->wkup_en)
                        irqc->irq_set_wake = NULL;
        }
index 171a638..52b447c 100644 (file)
@@ -167,6 +167,8 @@ static int palmas_gpio_probe(struct platform_device *pdev)
        const struct palmas_device_data *dev_data;
 
        match = of_match_device(of_palmas_gpio_match, &pdev->dev);
+       if (!match)
+               return -ENODEV;
        dev_data = match->data;
        if (!dev_data)
                dev_data = &palmas_dev_data;
index 045a952..7b25fdf 100644 (file)
@@ -187,11 +187,15 @@ MODULE_DEVICE_TABLE(of, syscon_gpio_ids);
 static int syscon_gpio_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       const struct of_device_id *of_id = of_match_device(syscon_gpio_ids, dev);
+       const struct of_device_id *of_id;
        struct syscon_gpio_priv *priv;
        struct device_node *np = dev->of_node;
        int ret;
 
+       of_id = of_match_device(syscon_gpio_ids, dev);
+       if (!of_id)
+               return -ENODEV;
+
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
index 027e5f4..896bf29 100644 (file)
@@ -375,6 +375,60 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
 }
 #endif
 
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static int dbg_gpio_show(struct seq_file *s, void *unused)
+{
+       int i;
+       int j;
+
+       for (i = 0; i < tegra_gpio_bank_count; i++) {
+               for (j = 0; j < 4; j++) {
+                       int gpio = tegra_gpio_compose(i, j, 0);
+                       seq_printf(s,
+                               "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
+                               i, j,
+                               tegra_gpio_readl(GPIO_CNF(gpio)),
+                               tegra_gpio_readl(GPIO_OE(gpio)),
+                               tegra_gpio_readl(GPIO_OUT(gpio)),
+                               tegra_gpio_readl(GPIO_IN(gpio)),
+                               tegra_gpio_readl(GPIO_INT_STA(gpio)),
+                               tegra_gpio_readl(GPIO_INT_ENB(gpio)),
+                               tegra_gpio_readl(GPIO_INT_LVL(gpio)));
+               }
+       }
+       return 0;
+}
+
+static int dbg_gpio_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, dbg_gpio_show, &inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+       .open           = dbg_gpio_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static void tegra_gpio_debuginit(void)
+{
+       (void) debugfs_create_file("tegra_gpio", S_IRUGO,
+                                       NULL, NULL, &debug_fops);
+}
+
+#else
+
+static inline void tegra_gpio_debuginit(void)
+{
+}
+
+#endif
+
 static struct irq_chip tegra_gpio_irq_chip = {
        .name           = "GPIO",
        .irq_ack        = tegra_gpio_irq_ack,
@@ -519,6 +573,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
                        spin_lock_init(&bank->lvl_lock[j]);
        }
 
+       tegra_gpio_debuginit();
+
        return 0;
 }
 
@@ -536,52 +592,3 @@ static int __init tegra_gpio_init(void)
        return platform_driver_register(&tegra_gpio_driver);
 }
 postcore_initcall(tegra_gpio_init);
-
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-static int dbg_gpio_show(struct seq_file *s, void *unused)
-{
-       int i;
-       int j;
-
-       for (i = 0; i < tegra_gpio_bank_count; i++) {
-               for (j = 0; j < 4; j++) {
-                       int gpio = tegra_gpio_compose(i, j, 0);
-                       seq_printf(s,
-                               "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
-                               i, j,
-                               tegra_gpio_readl(GPIO_CNF(gpio)),
-                               tegra_gpio_readl(GPIO_OE(gpio)),
-                               tegra_gpio_readl(GPIO_OUT(gpio)),
-                               tegra_gpio_readl(GPIO_IN(gpio)),
-                               tegra_gpio_readl(GPIO_INT_STA(gpio)),
-                               tegra_gpio_readl(GPIO_INT_ENB(gpio)),
-                               tegra_gpio_readl(GPIO_INT_LVL(gpio)));
-               }
-       }
-       return 0;
-}
-
-static int dbg_gpio_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, dbg_gpio_show, &inode->i_private);
-}
-
-static const struct file_operations debug_fops = {
-       .open           = dbg_gpio_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int __init tegra_gpio_debuginit(void)
-{
-       (void) debugfs_create_file("tegra_gpio", S_IRUGO,
-                                       NULL, NULL, &debug_fops);
-       return 0;
-}
-late_initcall(tegra_gpio_debuginit);
-#endif
index a18f00f..2a91f32 100644 (file)
@@ -233,7 +233,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
                for (i = 0; i != chip->ngpio; ++i) {
                        struct gpio_desc *gpio = &chip->desc[i];
 
-                       if (!gpio->name)
+                       if (!gpio->name || !name)
                                continue;
 
                        if (!strcmp(gpio->name, name)) {
index 615ce6d..251b147 100644 (file)
@@ -389,7 +389,6 @@ struct amdgpu_clock {
  * Fences.
  */
 struct amdgpu_fence_driver {
-       struct amdgpu_ring              *ring;
        uint64_t                        gpu_addr;
        volatile uint32_t               *cpu_addr;
        /* sync_seq is protected by ring emission lock */
@@ -398,7 +397,7 @@ struct amdgpu_fence_driver {
        bool                            initialized;
        struct amdgpu_irq_src           *irq_src;
        unsigned                        irq_type;
-       struct delayed_work             lockup_work;
+       struct timer_list               fallback_timer;
        wait_queue_head_t               fence_queue;
 };
 
@@ -497,6 +496,7 @@ struct amdgpu_bo_va_mapping {
 
 /* bo virtual addresses in a specific vm */
 struct amdgpu_bo_va {
+       struct mutex                    mutex;
        /* protected by bo being reserved */
        struct list_head                bo_list;
        struct fence                    *last_pt_update;
@@ -917,8 +917,8 @@ struct amdgpu_ring {
 #define AMDGPU_VM_FAULT_STOP_ALWAYS    2
 
 struct amdgpu_vm_pt {
-       struct amdgpu_bo                *bo;
-       uint64_t                        addr;
+       struct amdgpu_bo        *bo;
+       uint64_t                addr;
 };
 
 struct amdgpu_vm_id {
@@ -926,13 +926,9 @@ struct amdgpu_vm_id {
        uint64_t                pd_gpu_addr;
        /* last flushed PD/PT update */
        struct fence            *flushed_updates;
-       /* last use of vmid */
-       struct fence            *last_id_use;
 };
 
 struct amdgpu_vm {
-       struct mutex            mutex;
-
        struct rb_root          va;
 
        /* protecting invalidated */
@@ -957,24 +953,70 @@ struct amdgpu_vm {
 
        /* for id and flush management per ring */
        struct amdgpu_vm_id     ids[AMDGPU_MAX_RINGS];
+       /* for interval tree */
+       spinlock_t              it_lock;
 };
 
 struct amdgpu_vm_manager {
-       struct fence                    *active[AMDGPU_NUM_VM];
-       uint32_t                        max_pfn;
+       struct {
+               struct fence    *active;
+               atomic_long_t   owner;
+       } ids[AMDGPU_NUM_VM];
+
+       uint32_t                                max_pfn;
        /* number of VMIDs */
-       unsigned                        nvm;
+       unsigned                                nvm;
        /* vram base address for page table entry  */
-       u64                             vram_base_offset;
+       u64                                     vram_base_offset;
        /* is vm enabled? */
-       bool                            enabled;
-       /* for hw to save the PD addr on suspend/resume */
-       uint32_t                        saved_table_addr[AMDGPU_NUM_VM];
+       bool                                    enabled;
        /* vm pte handling */
        const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
        struct amdgpu_ring                      *vm_pte_funcs_ring;
 };
 
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
+                                              struct amdgpu_vm *vm,
+                                              struct list_head *head);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+                     struct amdgpu_sync *sync);
+void amdgpu_vm_flush(struct amdgpu_ring *ring,
+                    struct amdgpu_vm *vm,
+                    struct fence *updates);
+void amdgpu_vm_fence(struct amdgpu_device *adev,
+                    struct amdgpu_vm *vm,
+                    struct fence *fence);
+uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+                                   struct amdgpu_vm *vm);
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+                         struct amdgpu_vm *vm);
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                            struct amdgpu_sync *sync);
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+                       struct amdgpu_bo_va *bo_va,
+                       struct ttm_mem_reg *mem);
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+                            struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+                                      struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+                                     struct amdgpu_vm *vm,
+                                     struct amdgpu_bo *bo);
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+                    struct amdgpu_bo_va *bo_va,
+                    uint64_t addr, uint64_t offset,
+                    uint64_t size, uint32_t flags);
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+                      struct amdgpu_bo_va *bo_va,
+                      uint64_t addr);
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+                     struct amdgpu_bo_va *bo_va);
+int amdgpu_vm_free_job(struct amdgpu_job *job);
+
 /*
  * context related structures
  */
@@ -1211,6 +1253,7 @@ struct amdgpu_cs_parser {
        /* relocations */
        struct amdgpu_bo_list_entry     *vm_bos;
        struct list_head        validated;
+       struct fence            *fence;
 
        struct amdgpu_ib        *ibs;
        uint32_t                num_ibs;
@@ -1226,7 +1269,7 @@ struct amdgpu_job {
        struct amdgpu_device    *adev;
        struct amdgpu_ib        *ibs;
        uint32_t                num_ibs;
-       struct mutex            job_lock;
+       void                    *owner;
        struct amdgpu_user_fence uf;
        int (*free_job)(struct amdgpu_job *job);
 };
@@ -2257,11 +2300,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
 bool amdgpu_card_posted(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
 bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
-struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
-                                                struct drm_file *filp,
-                                                struct amdgpu_ctx *ctx,
-                                                struct amdgpu_ib *ibs,
-                                                uint32_t num_ibs);
 
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@@ -2318,49 +2356,6 @@ int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
                             unsigned long arg);
 
-/*
- * vm
- */
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
-                                         struct amdgpu_vm *vm,
-                                         struct list_head *head);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-                     struct amdgpu_sync *sync);
-void amdgpu_vm_flush(struct amdgpu_ring *ring,
-                    struct amdgpu_vm *vm,
-                    struct fence *updates);
-void amdgpu_vm_fence(struct amdgpu_device *adev,
-                    struct amdgpu_vm *vm,
-                    struct amdgpu_fence *fence);
-uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-                                   struct amdgpu_vm *vm);
-int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
-                               struct amdgpu_vm *vm);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
-                               struct amdgpu_vm *vm, struct amdgpu_sync *sync);
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
-                       struct amdgpu_bo_va *bo_va,
-                       struct ttm_mem_reg *mem);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
-                            struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
-                                      struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
-                                     struct amdgpu_vm *vm,
-                                     struct amdgpu_bo *bo);
-int amdgpu_vm_bo_map(struct amdgpu_device *adev,
-                    struct amdgpu_bo_va *bo_va,
-                    uint64_t addr, uint64_t offset,
-                    uint64_t size, uint32_t flags);
-int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
-                      struct amdgpu_bo_va *bo_va,
-                      uint64_t addr);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
-                     struct amdgpu_bo_va *bo_va);
-int amdgpu_vm_free_job(struct amdgpu_job *job);
 /*
  * functions used by amdgpu_encoder.c
  */
index dfc4d02..1d44d50 100644 (file)
@@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
        return 0;
 }
 
-struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
-                                               struct drm_file *filp,
-                                               struct amdgpu_ctx *ctx,
-                                               struct amdgpu_ib *ibs,
-                                               uint32_t num_ibs)
-{
-       struct amdgpu_cs_parser *parser;
-       int i;
-
-       parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
-       if (!parser)
-               return NULL;
-
-       parser->adev = adev;
-       parser->filp = filp;
-       parser->ctx = ctx;
-       parser->ibs = ibs;
-       parser->num_ibs = num_ibs;
-       for (i = 0; i < num_ibs; i++)
-               ibs[i].ctx = ctx;
-
-       return parser;
-}
-
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 {
        union drm_amdgpu_cs *cs = data;
@@ -463,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
        return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
 }
 
-static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:    parser structure holding parsing context.
+ * @error:     error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
 {
+       unsigned i;
+
        if (!error) {
                /* Sort the buffer list from the smallest to largest buffer,
                 * which affects the order of buffers in the LRU list.
@@ -479,17 +465,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err
                list_sort(NULL, &parser->validated, cmp_size_smaller_first);
 
                ttm_eu_fence_buffer_objects(&parser->ticket,
-                               &parser->validated,
-                               &parser->ibs[parser->num_ibs-1].fence->base);
+                                           &parser->validated,
+                                           parser->fence);
        } else if (backoff) {
                ttm_eu_backoff_reservation(&parser->ticket,
                                           &parser->validated);
        }
-}
+       fence_put(parser->fence);
 
-static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
-{
-       unsigned i;
        if (parser->ctx)
                amdgpu_ctx_put(parser->ctx);
        if (parser->bo_list)
@@ -499,31 +482,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
        for (i = 0; i < parser->nchunks; i++)
                drm_free_large(parser->chunks[i].kdata);
        kfree(parser->chunks);
-       if (!amdgpu_enable_scheduler)
-       {
-               if (parser->ibs)
-                       for (i = 0; i < parser->num_ibs; i++)
-                               amdgpu_ib_free(parser->adev, &parser->ibs[i]);
-               kfree(parser->ibs);
-               if (parser->uf.bo)
-                       drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
-       }
-
-       kfree(parser);
-}
-
-/**
- * cs_parser_fini() - clean parser states
- * @parser:    parser structure holding parsing context.
- * @error:     error number
- *
- * If error is set than unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
-{
-       amdgpu_cs_parser_fini_early(parser, error, backoff);
-       amdgpu_cs_parser_fini_late(parser);
+       if (parser->ibs)
+               for (i = 0; i < parser->num_ibs; i++)
+                       amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+       kfree(parser->ibs);
+       if (parser->uf.bo)
+               drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
 }
 
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -610,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
        }
 
        r = amdgpu_bo_vm_update_pte(parser, vm);
-       if (r) {
-               goto out;
-       }
-       amdgpu_cs_sync_rings(parser);
-       if (!amdgpu_enable_scheduler)
-               r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
-                                      parser->filp);
+       if (!r)
+               amdgpu_cs_sync_rings(parser);
 
-out:
        return r;
 }
 
@@ -826,38 +784,35 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct amdgpu_device *adev = dev->dev_private;
        union drm_amdgpu_cs *cs = data;
-       struct amdgpu_fpriv *fpriv = filp->driver_priv;
-       struct amdgpu_vm *vm = &fpriv->vm;
-       struct amdgpu_cs_parser *parser;
+       struct amdgpu_cs_parser parser = {};
        bool reserved_buffers = false;
        int i, r;
 
        if (!adev->accel_working)
                return -EBUSY;
 
-       parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
-       if (!parser)
-               return -ENOMEM;
-       r = amdgpu_cs_parser_init(parser, data);
+       parser.adev = adev;
+       parser.filp = filp;
+
+       r = amdgpu_cs_parser_init(&parser, data);
        if (r) {
                DRM_ERROR("Failed to initialize parser !\n");
-               amdgpu_cs_parser_fini(parser, r, false);
+               amdgpu_cs_parser_fini(&parser, r, false);
                r = amdgpu_cs_handle_lockup(adev, r);
                return r;
        }
-       mutex_lock(&vm->mutex);
-       r = amdgpu_cs_parser_relocs(parser);
+       r = amdgpu_cs_parser_relocs(&parser);
        if (r == -ENOMEM)
                DRM_ERROR("Not enough memory for command submission!\n");
        else if (r && r != -ERESTARTSYS)
                DRM_ERROR("Failed to process the buffer list %d!\n", r);
        else if (!r) {
                reserved_buffers = true;
-               r = amdgpu_cs_ib_fill(adev, parser);
+               r = amdgpu_cs_ib_fill(adev, &parser);
        }
 
        if (!r) {
-               r = amdgpu_cs_dependencies(adev, parser);
+               r = amdgpu_cs_dependencies(adev, &parser);
                if (r)
                        DRM_ERROR("Failed in the dependencies handling %d!\n", r);
        }
@@ -865,63 +820,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        if (r)
                goto out;
 
-       for (i = 0; i < parser->num_ibs; i++)
-               trace_amdgpu_cs(parser, i);
+       for (i = 0; i < parser.num_ibs; i++)
+               trace_amdgpu_cs(&parser, i);
 
-       r = amdgpu_cs_ib_vm_chunk(adev, parser);
+       r = amdgpu_cs_ib_vm_chunk(adev, &parser);
        if (r)
                goto out;
 
-       if (amdgpu_enable_scheduler && parser->num_ibs) {
+       if (amdgpu_enable_scheduler && parser.num_ibs) {
+               struct amdgpu_ring * ring = parser.ibs->ring;
+               struct amd_sched_fence *fence;
                struct amdgpu_job *job;
-               struct amdgpu_ring * ring =  parser->ibs->ring;
+
                job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
                if (!job) {
                        r = -ENOMEM;
                        goto out;
                }
+
                job->base.sched = &ring->sched;
-               job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
-               job->adev = parser->adev;
-               job->ibs = parser->ibs;
-               job->num_ibs = parser->num_ibs;
-               job->base.owner = parser->filp;
-               mutex_init(&job->job_lock);
+               job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
+               job->adev = parser.adev;
+               job->owner = parser.filp;
+               job->free_job = amdgpu_cs_free_job;
+
+               job->ibs = parser.ibs;
+               job->num_ibs = parser.num_ibs;
+               parser.ibs = NULL;
+               parser.num_ibs = 0;
+
                if (job->ibs[job->num_ibs - 1].user) {
-                       memcpy(&job->uf,  &parser->uf,
-                              sizeof(struct amdgpu_user_fence));
+                       job->uf = parser.uf;
                        job->ibs[job->num_ibs - 1].user = &job->uf;
+                       parser.uf.bo = NULL;
                }
 
-               job->free_job = amdgpu_cs_free_job;
-               mutex_lock(&job->job_lock);
-               r = amd_sched_entity_push_job(&job->base);
-               if (r) {
-                       mutex_unlock(&job->job_lock);
+               fence = amd_sched_fence_create(job->base.s_entity,
+                                              parser.filp);
+               if (!fence) {
+                       r = -ENOMEM;
                        amdgpu_cs_free_job(job);
                        kfree(job);
                        goto out;
                }
-               cs->out.handle =
-                       amdgpu_ctx_add_fence(parser->ctx, ring,
-                                            &job->base.s_fence->base);
-               parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
+               job->base.s_fence = fence;
+               parser.fence = fence_get(&fence->base);
 
-               list_sort(NULL, &parser->validated, cmp_size_smaller_first);
-               ttm_eu_fence_buffer_objects(&parser->ticket,
-                               &parser->validated,
-                               &job->base.s_fence->base);
+               cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
+                                                     &fence->base);
+               job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
 
-               mutex_unlock(&job->job_lock);
-               amdgpu_cs_parser_fini_late(parser);
-               mutex_unlock(&vm->mutex);
-               return 0;
+               trace_amdgpu_cs_ioctl(job);
+               amd_sched_entity_push_job(&job->base);
+
+       } else {
+               struct amdgpu_fence *fence;
+
+               r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
+                                      parser.filp);
+               fence = parser.ibs[parser.num_ibs - 1].fence;
+               parser.fence = fence_get(&fence->base);
+               cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
        }
 
-       cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
 out:
-       amdgpu_cs_parser_fini(parser, r, reserved_buffers);
-       mutex_unlock(&vm->mutex);
+       amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
        r = amdgpu_cs_handle_lockup(adev, r);
        return r;
 }
index 257d722..3671f9f 100644 (file)
@@ -47,6 +47,9 @@
  * that the the relevant GPU caches have been flushed.
  */
 
+static struct kmem_cache *amdgpu_fence_slab;
+static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
+
 /**
  * amdgpu_fence_write - write a fence value
  *
@@ -84,24 +87,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
        return seq;
 }
 
-/**
- * amdgpu_fence_schedule_check - schedule lockup check
- *
- * @ring: pointer to struct amdgpu_ring
- *
- * Queues a delayed work item to check for lockups.
- */
-static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
-{
-       /*
-        * Do not reset the timer here with mod_delayed_work,
-        * this can livelock in an interaction with TTM delayed destroy.
-        */
-       queue_delayed_work(system_power_efficient_wq,
-               &ring->fence_drv.lockup_work,
-               AMDGPU_FENCE_JIFFIES_TIMEOUT);
-}
-
 /**
  * amdgpu_fence_emit - emit a fence on the requested ring
  *
@@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
        struct amdgpu_device *adev = ring->adev;
 
        /* we are protected by the ring emission mutex */
-       *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+       *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
        if ((*fence) == NULL) {
                return -ENOMEM;
        }
@@ -132,10 +117,22 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
                               (*fence)->seq,
                               AMDGPU_FENCE_FLAG_INT);
-       trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
        return 0;
 }
 
+/**
+ * amdgpu_fence_schedule_fallback - schedule fallback check
+ *
+ * @ring: pointer to struct amdgpu_ring
+ *
+ * Start a timer as fallback to our interrupts.
+ */
+static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
+{
+       mod_timer(&ring->fence_drv.fallback_timer,
+                 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
+}
+
 /**
  * amdgpu_fence_activity - check for fence activity
  *
@@ -202,45 +199,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
        } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
 
        if (seq < last_emitted)
-               amdgpu_fence_schedule_check(ring);
+               amdgpu_fence_schedule_fallback(ring);
 
        return wake;
 }
 
 /**
- * amdgpu_fence_check_lockup - check for hardware lockup
+ * amdgpu_fence_process - process a fence
  *
- * @work: delayed work item
+ * @adev: amdgpu_device pointer
+ * @ring: ring index the fence is associated with
  *
- * Checks for fence activity and if there is none probe
- * the hardware if a lockup occured.
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
  */
-static void amdgpu_fence_check_lockup(struct work_struct *work)
+void amdgpu_fence_process(struct amdgpu_ring *ring)
 {
-       struct amdgpu_fence_driver *fence_drv;
-       struct amdgpu_ring *ring;
-
-       fence_drv = container_of(work, struct amdgpu_fence_driver,
-                               lockup_work.work);
-       ring = fence_drv->ring;
-
        if (amdgpu_fence_activity(ring))
                wake_up_all(&ring->fence_drv.fence_queue);
 }
 
 /**
- * amdgpu_fence_process - process a fence
+ * amdgpu_fence_fallback - fallback for hardware interrupts
  *
- * @adev: amdgpu_device pointer
- * @ring: ring index the fence is associated with
+ * @work: delayed work item
  *
- * Checks the current fence value and wakes the fence queue
- * if the sequence number has increased (all asics).
+ * Checks for fence activity.
  */
-void amdgpu_fence_process(struct amdgpu_ring *ring)
+static void amdgpu_fence_fallback(unsigned long arg)
 {
-       if (amdgpu_fence_activity(ring))
-               wake_up_all(&ring->fence_drv.fence_queue);
+       struct amdgpu_ring *ring = (void *)arg;
+
+       amdgpu_fence_process(ring);
 }
 
 /**
@@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
        if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
                return 0;
 
-       amdgpu_fence_schedule_check(ring);
+       amdgpu_fence_schedule_fallback(ring);
        wait_event(ring->fence_drv.fence_queue, (
                   (signaled = amdgpu_fence_seq_signaled(ring, seq))));
 
@@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
        atomic64_set(&ring->fence_drv.last_seq, 0);
        ring->fence_drv.initialized = false;
 
-       INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
-                       amdgpu_fence_check_lockup);
-       ring->fence_drv.ring = ring;
+       setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
+                   (unsigned long)ring);
 
        init_waitqueue_head(&ring->fence_drv.fence_queue);
 
@@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
  */
 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
 {
+       if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
+               amdgpu_fence_slab = kmem_cache_create(
+                       "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
+                       SLAB_HWCACHE_ALIGN, NULL);
+               if (!amdgpu_fence_slab)
+                       return -ENOMEM;
+       }
        if (amdgpu_debugfs_fence_init(adev))
                dev_err(adev->dev, "fence debugfs file creation failed\n");
 
@@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
 {
        int i, r;
 
+       if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
+               kmem_cache_destroy(amdgpu_fence_slab);
        mutex_lock(&adev->ring_lock);
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
+
                if (!ring || !ring->fence_drv.initialized)
                        continue;
                r = amdgpu_fence_wait_empty(ring);
@@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
                amdgpu_irq_put(adev, ring->fence_drv.irq_src,
                               ring->fence_drv.irq_type);
                amd_sched_fini(&ring->sched);
+               del_timer_sync(&ring->fence_drv.fallback_timer);
                ring->fence_drv.initialized = false;
        }
        mutex_unlock(&adev->ring_lock);
@@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
        fence->fence_wake.func = amdgpu_fence_check_signaled;
        __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
        fence_get(f);
-       amdgpu_fence_schedule_check(ring);
+       if (!timer_pending(&ring->fence_drv.fallback_timer))
+               amdgpu_fence_schedule_fallback(ring);
        FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
        return true;
 }
 
+static void amdgpu_fence_release(struct fence *f)
+{
+       struct amdgpu_fence *fence = to_amdgpu_fence(f);
+       kmem_cache_free(amdgpu_fence_slab, fence);
+}
+
 const struct fence_ops amdgpu_fence_ops = {
        .get_driver_name = amdgpu_fence_get_driver_name,
        .get_timeline_name = amdgpu_fence_get_timeline_name,
        .enable_signaling = amdgpu_fence_enable_signaling,
        .signaled = amdgpu_fence_is_signaled,
        .wait = fence_default_wait,
-       .release = NULL,
+       .release = amdgpu_fence_release,
 };
 
 /*
index 0873328..fc32fc0 100644 (file)
@@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
        struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_bo_va *bo_va;
        int r;
-       mutex_lock(&vm->mutex);
        r = amdgpu_bo_reserve(rbo, false);
-       if (r) {
-               mutex_unlock(&vm->mutex);
+       if (r)
                return r;
-       }
 
        bo_va = amdgpu_vm_bo_find(vm, rbo);
        if (!bo_va) {
@@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
                ++bo_va->ref_count;
        }
        amdgpu_bo_unreserve(rbo);
-       mutex_unlock(&vm->mutex);
        return 0;
 }
 
@@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
        struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_bo_va *bo_va;
        int r;
-       mutex_lock(&vm->mutex);
        r = amdgpu_bo_reserve(rbo, true);
        if (r) {
-               mutex_unlock(&vm->mutex);
                dev_err(adev->dev, "leaking bo va because "
                        "we fail to reserve bo (%d)\n", r);
                return;
@@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
                }
        }
        amdgpu_bo_unreserve(rbo);
-       mutex_unlock(&vm->mutex);
 }
 
 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -483,6 +476,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
                if (domain == AMDGPU_GEM_DOMAIN_CPU)
                        goto error_unreserve;
        }
+       r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
+       if (r)
+               goto error_unreserve;
 
        r = amdgpu_vm_clear_freed(adev, bo_va->vm);
        if (r)
@@ -512,6 +508,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        struct amdgpu_fpriv *fpriv = filp->driver_priv;
        struct amdgpu_bo *rbo;
        struct amdgpu_bo_va *bo_va;
+       struct ttm_validate_buffer tv, tv_pd;
+       struct ww_acquire_ctx ticket;
+       struct list_head list, duplicates;
        uint32_t invalid_flags, va_flags = 0;
        int r = 0;
 
@@ -547,19 +546,28 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        gobj = drm_gem_object_lookup(dev, filp, args->handle);
        if (gobj == NULL)
                return -ENOENT;
-       mutex_lock(&fpriv->vm.mutex);
        rbo = gem_to_amdgpu_bo(gobj);
-       r = amdgpu_bo_reserve(rbo, false);
+       INIT_LIST_HEAD(&list);
+       INIT_LIST_HEAD(&duplicates);
+       tv.bo = &rbo->tbo;
+       tv.shared = true;
+       list_add(&tv.head, &list);
+
+       if (args->operation == AMDGPU_VA_OP_MAP) {
+               tv_pd.bo = &fpriv->vm.page_directory->tbo;
+               tv_pd.shared = true;
+               list_add(&tv_pd.head, &list);
+       }
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
        if (r) {
-               mutex_unlock(&fpriv->vm.mutex);
                drm_gem_object_unreference_unlocked(gobj);
                return r;
        }
 
        bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
        if (!bo_va) {
-               amdgpu_bo_unreserve(rbo);
-               mutex_unlock(&fpriv->vm.mutex);
+               ttm_eu_backoff_reservation(&ticket, &list);
+               drm_gem_object_unreference_unlocked(gobj);
                return -ENOENT;
        }
 
@@ -581,10 +589,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        default:
                break;
        }
-
+       ttm_eu_backoff_reservation(&ticket, &list);
        if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
                amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
-       mutex_unlock(&fpriv->vm.mutex);
+
        drm_gem_object_unreference_unlocked(gobj);
        return r;
 }
index e659877..9e25eda 100644 (file)
@@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
        int r;
 
        if (size) {
-               r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
+               r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
                                      &ib->sa_bo, size, 256);
                if (r) {
                        dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
        }
 
        if (ib->vm)
-               amdgpu_vm_fence(adev, ib->vm, ib->fence);
+               amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
 
        amdgpu_ring_unlock_commit(ring);
        return 0;
index 3c2ff45..ea756e7 100644 (file)
@@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
                                      struct amdgpu_sa_manager *sa_manager);
 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
                                        struct amdgpu_sa_manager *sa_manager);
-int amdgpu_sa_bo_new(struct amdgpu_device *adev,
-                           struct amdgpu_sa_manager *sa_manager,
-                           struct amdgpu_sa_bo **sa_bo,
-                           unsigned size, unsigned align);
+int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
+                    struct amdgpu_sa_bo **sa_bo,
+                    unsigned size, unsigned align);
 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
                              struct amdgpu_sa_bo **sa_bo,
                              struct fence *fence);
index 0212b31..8b88edb 100644 (file)
@@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
        return false;
 }
 
-int amdgpu_sa_bo_new(struct amdgpu_device *adev,
-                    struct amdgpu_sa_manager *sa_manager,
+int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
                     struct amdgpu_sa_bo **sa_bo,
                     unsigned size, unsigned align)
 {
index dcf4a8a..438c052 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/sched.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
+#include "amdgpu_trace.h"
 
 static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
 {
@@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
                return NULL;
        }
        job = to_amdgpu_job(sched_job);
-       mutex_lock(&job->job_lock);
-       r = amdgpu_ib_schedule(job->adev,
-                              job->num_ibs,
-                              job->ibs,
-                              job->base.owner);
+       trace_amdgpu_sched_run_job(job);
+       r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
        if (r) {
                DRM_ERROR("Error scheduling IBs (%d)\n", r);
                goto err;
@@ -61,8 +59,6 @@ err:
        if (job->free_job)
                job->free_job(job);
 
-       mutex_unlock(&job->job_lock);
-       fence_put(&job->base.s_fence->base);
        kfree(job);
        return fence ? &fence->base : NULL;
 }
@@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
                        return -ENOMEM;
                job->base.sched = &ring->sched;
                job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+               job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
+               if (!job->base.s_fence) {
+                       kfree(job);
+                       return -ENOMEM;
+               }
+               *f = fence_get(&job->base.s_fence->base);
+
                job->adev = adev;
                job->ibs = ibs;
                job->num_ibs = num_ibs;
-               job->base.owner = owner;
-               mutex_init(&job->job_lock);
+               job->owner = owner;
                job->free_job = free_job;
-               mutex_lock(&job->job_lock);
-               r = amd_sched_entity_push_job(&job->base);
-               if (r) {
-                       mutex_unlock(&job->job_lock);
-                       kfree(job);
-                       return r;
-               }
-               *f = fence_get(&job->base.s_fence->base);
-               mutex_unlock(&job->job_lock);
+               amd_sched_entity_push_job(&job->base);
        } else {
                r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
                if (r)
index ff3ca52..1caaf20 100644 (file)
@@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev,
        if (*semaphore == NULL) {
                return -ENOMEM;
        }
-       r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
+       r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
                             &(*semaphore)->sa_bo, 8, 8);
        if (r) {
                kfree(*semaphore);
index a6697fd..dd005c3 100644 (file)
@@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
                        return -EINVAL;
                }
 
-               if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
-                   (count >= AMDGPU_NUM_SYNCS)) {
+               if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
+                       r = fence_wait(&fence->base, true);
+                       if (r)
+                               return r;
+                       continue;
+               }
+
+               if (count >= AMDGPU_NUM_SYNCS) {
                        /* not enough room, wait manually */
                        r = fence_wait(&fence->base, false);
                        if (r)
index 76ecbaf..8f9834a 100644 (file)
@@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs,
                      __entry->fences)
 );
 
+TRACE_EVENT(amdgpu_cs_ioctl,
+           TP_PROTO(struct amdgpu_job *job),
+           TP_ARGS(job),
+           TP_STRUCT__entry(
+                            __field(struct amdgpu_device *, adev)
+                            __field(struct amd_sched_job *, sched_job)
+                            __field(struct amdgpu_ib *, ib)
+                            __field(struct fence *, fence)
+                            __field(char *, ring_name)
+                            __field(u32, num_ibs)
+                            ),
+
+           TP_fast_assign(
+                          __entry->adev = job->adev;
+                          __entry->sched_job = &job->base;
+                          __entry->ib = job->ibs;
+                          __entry->fence = &job->base.s_fence->base;
+                          __entry->ring_name = job->ibs[0].ring->name;
+                          __entry->num_ibs = job->num_ibs;
+                          ),
+           TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
+                     __entry->adev, __entry->sched_job, __entry->ib,
+                     __entry->fence, __entry->ring_name, __entry->num_ibs)
+);
+
+TRACE_EVENT(amdgpu_sched_run_job,
+           TP_PROTO(struct amdgpu_job *job),
+           TP_ARGS(job),
+           TP_STRUCT__entry(
+                            __field(struct amdgpu_device *, adev)
+                            __field(struct amd_sched_job *, sched_job)
+                            __field(struct amdgpu_ib *, ib)
+                            __field(struct fence *, fence)
+                            __field(char *, ring_name)
+                            __field(u32, num_ibs)
+                            ),
+
+           TP_fast_assign(
+                          __entry->adev = job->adev;
+                          __entry->sched_job = &job->base;
+                          __entry->ib = job->ibs;
+                          __entry->fence = &job->base.s_fence->base;
+                          __entry->ring_name = job->ibs[0].ring->name;
+                          __entry->num_ibs = job->num_ibs;
+                          ),
+           TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
+                     __entry->adev, __entry->sched_job, __entry->ib,
+                     __entry->fence, __entry->ring_name, __entry->num_ibs)
+);
+
+
 TRACE_EVENT(amdgpu_vm_grab_id,
            TP_PROTO(unsigned vmid, int ring),
            TP_ARGS(vmid, ring),
@@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set,
            TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
 );
 
-DECLARE_EVENT_CLASS(amdgpu_fence_request,
-
-           TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
-           TP_ARGS(dev, ring, seqno),
-
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                            __field(int, ring)
-                            __field(u32, seqno)
-                            ),
-
-           TP_fast_assign(
-                          __entry->dev = dev->primary->index;
-                          __entry->ring = ring;
-                          __entry->seqno = seqno;
-                          ),
-
-           TP_printk("dev=%u, ring=%d, seqno=%u",
-                     __entry->dev, __entry->ring, __entry->seqno)
-);
-
-DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
-
-           TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
-           TP_ARGS(dev, ring, seqno)
-);
-
-DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
-
-           TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
-           TP_ARGS(dev, ring, seqno)
-);
-
-DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
-
-           TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
-           TP_ARGS(dev, ring, seqno)
-);
-
 DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
 
            TP_PROTO(int ring, struct amdgpu_semaphore *sem),
index 81bb8e9..d4bac5f 100644 (file)
@@ -1073,10 +1073,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
        ret = drm_mm_dump_table(m, mm);
        spin_unlock(&glob->lru_lock);
        if (ttm_pl == TTM_PL_VRAM)
-               seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n",
+               seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
                           adev->mman.bdev.man[ttm_pl].size,
-                          atomic64_read(&adev->vram_usage) >> 20,
-                          atomic64_read(&adev->vram_vis_usage) >> 20);
+                          (u64)atomic64_read(&adev->vram_usage) >> 20,
+                          (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
        return ret;
 }
 
index 03f0c3b..a745eee 100644 (file)
@@ -392,7 +392,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
        ib->ptr[ib->length_dw++] = handle;
 
-       ib->ptr[ib->length_dw++] = 0x00000030; /* len */
+       if ((ring->adev->vce.fw_version >> 24) >= 52)
+               ib->ptr[ib->length_dw++] = 0x00000040; /* len */
+       else
+               ib->ptr[ib->length_dw++] = 0x00000030; /* len */
        ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
        ib->ptr[ib->length_dw++] = 0x00000000;
        ib->ptr[ib->length_dw++] = 0x00000042;
@@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        ib->ptr[ib->length_dw++] = 0x00000100;
        ib->ptr[ib->length_dw++] = 0x0000000c;
        ib->ptr[ib->length_dw++] = 0x00000000;
+       if ((ring->adev->vce.fw_version >> 24) >= 52) {
+               ib->ptr[ib->length_dw++] = 0x00000000;
+               ib->ptr[ib->length_dw++] = 0x00000000;
+               ib->ptr[ib->length_dw++] = 0x00000000;
+               ib->ptr[ib->length_dw++] = 0x00000000;
+       }
 
        ib->ptr[ib->length_dw++] = 0x00000014; /* len */
        ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
index 633a32a..ae037e5 100644 (file)
@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        unsigned i;
 
        /* check if the id is still valid */
-       if (vm_id->id && vm_id->last_id_use &&
-           vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
-               trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
-               return 0;
+       if (vm_id->id) {
+               unsigned id = vm_id->id;
+               long owner;
+
+               owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
+               if (owner == (long)vm) {
+                       trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
+                       return 0;
+               }
        }
 
        /* we definately need to flush */
@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 
        /* skip over VMID 0, since it is the system VM */
        for (i = 1; i < adev->vm_manager.nvm; ++i) {
-               struct fence *fence = adev->vm_manager.active[i];
+               struct fence *fence = adev->vm_manager.ids[i].active;
                struct amdgpu_ring *fring;
 
                if (fence == NULL) {
@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                if (choices[i]) {
                        struct fence *fence;
 
-                       fence  = adev->vm_manager.active[choices[i]];
+                       fence  = adev->vm_manager.ids[choices[i]].active;
                        vm_id->id = choices[i];
 
                        trace_amdgpu_vm_grab_id(choices[i], ring->idx);
@@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
        uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
        struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
        struct fence *flushed_updates = vm_id->flushed_updates;
-       bool is_earlier = false;
-
-       if (flushed_updates && updates) {
-               BUG_ON(flushed_updates->context != updates->context);
-               is_earlier = (updates->seqno - flushed_updates->seqno <=
-                             INT_MAX) ? true : false;
-       }
+       bool is_later;
 
-       if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
-           is_earlier) {
+       if (!flushed_updates)
+               is_later = true;
+       else if (!updates)
+               is_later = false;
+       else
+               is_later = fence_is_later(updates, flushed_updates);
 
+       if (pd_addr != vm_id->pd_gpu_addr || is_later) {
                trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
-               if (is_earlier) {
+               if (is_later) {
                        vm_id->flushed_updates = fence_get(updates);
                        fence_put(flushed_updates);
                }
-               if (!flushed_updates)
-                       vm_id->flushed_updates = fence_get(updates);
                vm_id->pd_gpu_addr = pd_addr;
                amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
        }
@@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
  */
 void amdgpu_vm_fence(struct amdgpu_device *adev,
                     struct amdgpu_vm *vm,
-                    struct amdgpu_fence *fence)
+                    struct fence *fence)
 {
-       unsigned ridx = fence->ring->idx;
-       unsigned vm_id = vm->ids[ridx].id;
-
-       fence_put(adev->vm_manager.active[vm_id]);
-       adev->vm_manager.active[vm_id] = fence_get(&fence->base);
+       struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
+       unsigned vm_id = vm->ids[ring->idx].id;
 
-       fence_put(vm->ids[ridx].last_id_use);
-       vm->ids[ridx].last_id_use = fence_get(&fence->base);
+       fence_put(adev->vm_manager.ids[vm_id].active);
+       adev->vm_manager.ids[vm_id].active = fence_get(fence);
+       atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
 }
 
 /**
@@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job)
  *
  * @adev: amdgpu_device pointer
  * @bo: bo to clear
+ *
+ * need to reserve bo first before calling it.
  */
 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                              struct amdgpu_bo *bo)
@@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        uint64_t addr;
        int r;
 
-       r = amdgpu_bo_reserve(bo, false);
-       if (r)
-               return r;
-
        r = reservation_object_reserve_shared(bo->tbo.resv);
        if (r)
                return r;
 
        r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
        if (r)
-               goto error_unreserve;
+               goto error;
 
        addr = amdgpu_bo_gpu_offset(bo);
        entries = amdgpu_bo_size(bo) / 8;
 
        ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
        if (!ib)
-               goto error_unreserve;
+               goto error;
 
        r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
        if (r)
@@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        if (!r)
                amdgpu_bo_fence(bo, fence, true);
        fence_put(fence);
-       if (amdgpu_enable_scheduler) {
-               amdgpu_bo_unreserve(bo);
+       if (amdgpu_enable_scheduler)
                return 0;
-       }
+
 error_free:
        amdgpu_ib_free(adev, ib);
        kfree(ib);
 
-error_unreserve:
-       amdgpu_bo_unreserve(bo);
+error:
        return r;
 }
 
@@ -926,8 +922,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
                bo_va = list_first_entry(&vm->invalidated,
                        struct amdgpu_bo_va, vm_status);
                spin_unlock(&vm->status_lock);
-
+               mutex_lock(&bo_va->mutex);
                r = amdgpu_vm_bo_update(adev, bo_va, NULL);
+               mutex_unlock(&bo_va->mutex);
                if (r)
                        return r;
 
@@ -971,7 +968,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
        INIT_LIST_HEAD(&bo_va->valids);
        INIT_LIST_HEAD(&bo_va->invalids);
        INIT_LIST_HEAD(&bo_va->vm_status);
-
+       mutex_init(&bo_va->mutex);
        list_add_tail(&bo_va->bo_list, &bo->va);
 
        return bo_va;
@@ -989,7 +986,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
  * Add a mapping of the BO at the specefied addr into the VM.
  * Returns 0 for success, error for failure.
  *
- * Object has to be reserved and gets unreserved by this function!
+ * Object has to be reserved and unreserved outside!
  */
 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                     struct amdgpu_bo_va *bo_va,
@@ -1005,30 +1002,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 
        /* validate the parameters */
        if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-           size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
-               amdgpu_bo_unreserve(bo_va->bo);
+           size == 0 || size & AMDGPU_GPU_PAGE_MASK)
                return -EINVAL;
-       }
 
        /* make sure object fit at this offset */
        eaddr = saddr + size;
-       if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
-               amdgpu_bo_unreserve(bo_va->bo);
+       if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
                return -EINVAL;
-       }
 
        last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
        if (last_pfn > adev->vm_manager.max_pfn) {
                dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
                        last_pfn, adev->vm_manager.max_pfn);
-               amdgpu_bo_unreserve(bo_va->bo);
                return -EINVAL;
        }
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
        eaddr /= AMDGPU_GPU_PAGE_SIZE;
 
+       spin_lock(&vm->it_lock);
        it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
+       spin_unlock(&vm->it_lock);
        if (it) {
                struct amdgpu_bo_va_mapping *tmp;
                tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1036,14 +1030,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
                        "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
                        tmp->it.start, tmp->it.last + 1);
-               amdgpu_bo_unreserve(bo_va->bo);
                r = -EINVAL;
                goto error;
        }
 
        mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
        if (!mapping) {
-               amdgpu_bo_unreserve(bo_va->bo);
                r = -ENOMEM;
                goto error;
        }
@@ -1054,8 +1046,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        mapping->offset = offset;
        mapping->flags = flags;
 
+       mutex_lock(&bo_va->mutex);
        list_add(&mapping->list, &bo_va->invalids);
+       mutex_unlock(&bo_va->mutex);
+       spin_lock(&vm->it_lock);
        interval_tree_insert(&mapping->it, &vm->va);
+       spin_unlock(&vm->it_lock);
        trace_amdgpu_vm_bo_map(bo_va, mapping);
 
        /* Make sure the page tables are allocated */
@@ -1067,8 +1063,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        if (eaddr > vm->max_pde_used)
                vm->max_pde_used = eaddr;
 
-       amdgpu_bo_unreserve(bo_va->bo);
-
        /* walk over the address space and allocate the page tables */
        for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
                struct reservation_object *resv = vm->page_directory->tbo.resv;
@@ -1077,13 +1071,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                if (vm->page_tables[pt_idx].bo)
                        continue;
 
-               ww_mutex_lock(&resv->lock, NULL);
                r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
                                     AMDGPU_GPU_PAGE_SIZE, true,
                                     AMDGPU_GEM_DOMAIN_VRAM,
                                     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
                                     NULL, resv, &pt);
-               ww_mutex_unlock(&resv->lock);
                if (r)
                        goto error_free;
 
@@ -1101,7 +1093,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 
 error_free:
        list_del(&mapping->list);
+       spin_lock(&vm->it_lock);
        interval_tree_remove(&mapping->it, &vm->va);
+       spin_unlock(&vm->it_lock);
        trace_amdgpu_vm_bo_unmap(bo_va, mapping);
        kfree(mapping);
 
@@ -1119,7 +1113,7 @@ error:
  * Remove a mapping of the BO at the specefied addr from the VM.
  * Returns 0 for success, error for failure.
  *
- * Object has to be reserved and gets unreserved by this function!
+ * Object has to be reserved and unreserved outside!
  */
 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
                       struct amdgpu_bo_va *bo_va,
@@ -1130,7 +1124,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
        bool valid = true;
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
-
+       mutex_lock(&bo_va->mutex);
        list_for_each_entry(mapping, &bo_va->valids, list) {
                if (mapping->it.start == saddr)
                        break;
@@ -1145,20 +1139,21 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
                }
 
                if (&mapping->list == &bo_va->invalids) {
-                       amdgpu_bo_unreserve(bo_va->bo);
+                       mutex_unlock(&bo_va->mutex);
                        return -ENOENT;
                }
        }
-
+       mutex_unlock(&bo_va->mutex);
        list_del(&mapping->list);
+       spin_lock(&vm->it_lock);
        interval_tree_remove(&mapping->it, &vm->va);
+       spin_unlock(&vm->it_lock);
        trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 
        if (valid)
                list_add(&mapping->list, &vm->freed);
        else
                kfree(mapping);
-       amdgpu_bo_unreserve(bo_va->bo);
 
        return 0;
 }
@@ -1187,17 +1182,21 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 
        list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
                list_del(&mapping->list);
+               spin_lock(&vm->it_lock);
                interval_tree_remove(&mapping->it, &vm->va);
+               spin_unlock(&vm->it_lock);
                trace_amdgpu_vm_bo_unmap(bo_va, mapping);
                list_add(&mapping->list, &vm->freed);
        }
        list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
                list_del(&mapping->list);
+               spin_lock(&vm->it_lock);
                interval_tree_remove(&mapping->it, &vm->va);
+               spin_unlock(&vm->it_lock);
                kfree(mapping);
        }
-
        fence_put(bo_va->last_pt_update);
+       mutex_destroy(&bo_va->mutex);
        kfree(bo_va);
 }
 
@@ -1241,15 +1240,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                vm->ids[i].id = 0;
                vm->ids[i].flushed_updates = NULL;
-               vm->ids[i].last_id_use = NULL;
        }
-       mutex_init(&vm->mutex);
        vm->va = RB_ROOT;
        spin_lock_init(&vm->status_lock);
        INIT_LIST_HEAD(&vm->invalidated);
        INIT_LIST_HEAD(&vm->cleared);
        INIT_LIST_HEAD(&vm->freed);
-
+       spin_lock_init(&vm->it_lock);
        pd_size = amdgpu_vm_directory_size(adev);
        pd_entries = amdgpu_vm_num_pdes(adev);
 
@@ -1269,8 +1266,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                             NULL, NULL, &vm->page_directory);
        if (r)
                return r;
-
+       r = amdgpu_bo_reserve(vm->page_directory, false);
+       if (r) {
+               amdgpu_bo_unref(&vm->page_directory);
+               vm->page_directory = NULL;
+               return r;
+       }
        r = amdgpu_vm_clear_bo(adev, vm->page_directory);
+       amdgpu_bo_unreserve(vm->page_directory);
        if (r) {
                amdgpu_bo_unref(&vm->page_directory);
                vm->page_directory = NULL;
@@ -1313,11 +1316,27 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
        amdgpu_bo_unref(&vm->page_directory);
        fence_put(vm->page_directory_fence);
-
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+               unsigned id = vm->ids[i].id;
+
+               atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
+                                   (long)vm, 0);
                fence_put(vm->ids[i].flushed_updates);
-               fence_put(vm->ids[i].last_id_use);
        }
 
-       mutex_destroy(&vm->mutex);
+}
+
+/**
+ * amdgpu_vm_manager_fini - cleanup VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Cleanup the VM manager and free resources.
+ */
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       for (i = 0; i < AMDGPU_NUM_VM; ++i)
+               fence_put(adev->vm_manager.ids[i].active);
 }
index a1a35a5..57a2e34 100644 (file)
@@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
                switch (state) {
                case AMDGPU_IRQ_STATE_DISABLE:
                        cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
-                       cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+                       cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
                        WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
                        break;
                case AMDGPU_IRQ_STATE_ENABLE:
                        cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
-                       cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+                       cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
                        WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
                        break;
                default:
@@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
                switch (state) {
                case AMDGPU_IRQ_STATE_DISABLE:
                        cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
-                       cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+                       cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
                        WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
                        break;
                case AMDGPU_IRQ_STATE_ENABLE:
                        cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
-                       cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+                       cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
                        WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
                        break;
                default:
index 6776cf7..e1dcab9 100644 (file)
@@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
        mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
        mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
        mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
-       mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
        mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
        mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
        mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
@@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
        mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
        mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
        mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
-       mmPCIE_INDEX, 0xffffffff, 0x0140001c,
-       mmPCIE_DATA, 0x000f0000, 0x00000000,
-       mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
-       mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
        mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 };
 
@@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
                adev->gfx.config.max_cu_per_sh = 16;
                adev->gfx.config.max_sh_per_se = 1;
                adev->gfx.config.max_backends_per_se = 4;
-               adev->gfx.config.max_texture_channel_caches = 8;
+               adev->gfx.config.max_texture_channel_caches = 16;
                adev->gfx.config.max_gprs = 256;
                adev->gfx.config.max_gs_threads = 32;
                adev->gfx.config.max_hw_contexts = 8;
@@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
                        WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
                }
        case CHIP_FIJI:
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 1:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 2:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 3:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 4:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 5:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 6:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 7:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 8:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+                               break;
+                       case 9:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 10:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 11:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 12:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 13:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 14:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 15:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 16:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 17:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 18:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 19:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 20:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 21:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 22:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 23:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 24:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 25:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 26:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 27:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 28:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 29:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 30:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       }
+                       adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+               for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 1:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 2:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 3:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 4:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 5:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 6:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 8:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 9:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 10:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 11:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 12:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 13:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 14:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                               NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 7:
+                               /* unused idx */
+                               continue;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       }
+                       adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+               break;
        case CHIP_TONGA:
                for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
                        switch (reg_offset) {
@@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
        amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
        switch (adev->asic_type) {
        case CHIP_TONGA:
-       case CHIP_FIJI:
                amdgpu_ring_write(ring, 0x16000012);
                amdgpu_ring_write(ring, 0x0000002A);
                break;
+       case CHIP_FIJI:
+               amdgpu_ring_write(ring, 0x3a00161a);
+               amdgpu_ring_write(ring, 0x0000002e);
+               break;
        case CHIP_TOPAZ:
        case CHIP_CARRIZO:
                amdgpu_ring_write(ring, 0x00000002);
index 85bbcdc..7427d8c 100644 (file)
@@ -40,7 +40,7 @@
 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 
-MODULE_FIRMWARE("radeon/boniare_mc.bin");
+MODULE_FIRMWARE("radeon/bonaire_mc.bin");
 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
 
 /**
@@ -501,6 +501,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
        WREG32(mmVM_L2_CNTL, tmp);
        tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
@@ -960,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle)
 
 static int gmc_v7_0_sw_fini(void *handle)
 {
-       int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->vm_manager.enabled) {
-               for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       fence_put(adev->vm_manager.active[i]);
+               amdgpu_vm_manager_fini(adev);
                gmc_v7_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }
@@ -1010,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle)
 
 static int gmc_v7_0_suspend(void *handle)
 {
-       int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->vm_manager.enabled) {
-               for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       fence_put(adev->vm_manager.active[i]);
+               amdgpu_vm_manager_fini(adev);
                gmc_v7_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }
index 1bcc4e7..cb0e50e 100644 (file)
@@ -629,6 +629,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
        WREG32(mmVM_L2_CNTL, tmp);
        tmp = RREG32(mmVM_L2_CNTL2);
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
@@ -979,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle)
 
 static int gmc_v8_0_sw_fini(void *handle)
 {
-       int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->vm_manager.enabled) {
-               for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       fence_put(adev->vm_manager.active[i]);
+               amdgpu_vm_manager_fini(adev);
                gmc_v8_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }
@@ -1031,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle)
 
 static int gmc_v8_0_suspend(void *handle)
 {
-       int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->vm_manager.enabled) {
-               for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       fence_put(adev->vm_manager.active[i]);
+               amdgpu_vm_manager_fini(adev);
                gmc_v8_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }
index 6a52db6..370c6c9 100644 (file)
@@ -40,6 +40,9 @@
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT    0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK      0x10
+#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0        0x8616
+#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1        0x8617
+#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2        0x8618
 
 #define VCE_V3_0_FW_SIZE       (384 * 1024)
 #define VCE_V3_0_STACK_SIZE    (64 * 1024)
@@ -130,9 +133,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
 
                /* set BUSY flag */
                WREG32_P(mmVCE_STATUS, 1, ~1);
-
-               WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
-                       ~VCE_VCPU_CNTL__CLK_EN_MASK);
+               if (adev->asic_type >= CHIP_STONEY)
+                       WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
+               else
+                       WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
+                               ~VCE_VCPU_CNTL__CLK_EN_MASK);
 
                WREG32_P(mmVCE_SOFT_RESET,
                         VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
@@ -391,8 +396,12 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
        WREG32(mmVCE_LMI_SWAP_CNTL, 0);
        WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
        WREG32(mmVCE_LMI_VM_CTRL, 0);
-
-       WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
+       if (adev->asic_type >= CHIP_STONEY) {
+               WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
+               WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
+               WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
+       } else
+               WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
        offset = AMDGPU_VCE_FIRMWARE_OFFSET;
        size = VCE_V3_0_FW_SIZE;
        WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
@@ -576,6 +585,11 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
                                      struct amdgpu_iv_entry *entry)
 {
        DRM_DEBUG("IH: VCE\n");
+
+       WREG32_P(mmVCE_SYS_INT_STATUS,
+               VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
+               ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
+
        switch (entry->src_data) {
        case 0:
                amdgpu_fence_process(&adev->vce.ring[0]);
index 144f50a..c89dc77 100644 (file)
@@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job,
            TP_ARGS(sched_job),
            TP_STRUCT__entry(
                             __field(struct amd_sched_entity *, entity)
+                            __field(struct amd_sched_job *, sched_job)
+                            __field(struct fence *, fence)
                             __field(const char *, name)
                             __field(u32, job_count)
                             __field(int, hw_job_count)
@@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job,
 
            TP_fast_assign(
                           __entry->entity = sched_job->s_entity;
+                          __entry->sched_job = sched_job;
+                          __entry->fence = &sched_job->s_fence->base;
                           __entry->name = sched_job->sched->name;
                           __entry->job_count = kfifo_len(
                                   &sched_job->s_entity->job_queue) / sizeof(sched_job);
                           __entry->hw_job_count = atomic_read(
                                   &sched_job->sched->hw_rq_count);
                           ),
-           TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
-                     __entry->entity, __entry->name, __entry->job_count,
-                     __entry->hw_job_count)
+           TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d",
+                     __entry->entity, __entry->sched_job, __entry->fence, __entry->name,
+                     __entry->job_count, __entry->hw_job_count)
 );
+
+TRACE_EVENT(amd_sched_process_job,
+           TP_PROTO(struct amd_sched_fence *fence),
+           TP_ARGS(fence),
+           TP_STRUCT__entry(
+                   __field(struct fence *, fence)
+                   ),
+
+           TP_fast_assign(
+                   __entry->fence = &fence->base;
+                   ),
+           TP_printk("fence=%p signaled", __entry->fence)
+);
+
 #endif
 
 /* This part must be outside protection */
index 89619a5..651129f 100644 (file)
 #define CREATE_TRACE_POINTS
 #include "gpu_sched_trace.h"
 
-static struct amd_sched_job *
-amd_sched_entity_pop_job(struct amd_sched_entity *entity);
+static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
 
+struct kmem_cache *sched_fence_slab;
+atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
+
 /* Initialize a given run queue struct */
 static void amd_sched_rq_init(struct amd_sched_rq *rq)
 {
@@ -61,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
 }
 
 /**
- * Select next job from a specified run queue with round robin policy.
- * Return NULL if nothing available.
+ * Select an entity which could provide a job to run
+ *
+ * @rq         The run queue to check.
+ *
+ * Try to find a ready entity, returns NULL if none found.
  */
-static struct amd_sched_job *
-amd_sched_rq_select_job(struct amd_sched_rq *rq)
+static struct amd_sched_entity *
+amd_sched_rq_select_entity(struct amd_sched_rq *rq)
 {
        struct amd_sched_entity *entity;
-       struct amd_sched_job *sched_job;
 
        spin_lock(&rq->lock);
 
        entity = rq->current_entity;
        if (entity) {
                list_for_each_entry_continue(entity, &rq->entities, list) {
-                       sched_job = amd_sched_entity_pop_job(entity);
-                       if (sched_job) {
+                       if (amd_sched_entity_is_ready(entity)) {
                                rq->current_entity = entity;
                                spin_unlock(&rq->lock);
-                               return sched_job;
+                               return entity;
                        }
                }
        }
 
        list_for_each_entry(entity, &rq->entities, list) {
 
-               sched_job = amd_sched_entity_pop_job(entity);
-               if (sched_job) {
+               if (amd_sched_entity_is_ready(entity)) {
                        rq->current_entity = entity;
                        spin_unlock(&rq->lock);
-                       return sched_job;
+                       return entity;
                }
 
                if (entity == rq->current_entity)
@@ -173,6 +175,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
        return false;
 }
 
+/**
+ * Check if entity is ready
+ *
+ * @entity     The pointer to a valid scheduler entity
+ *
+ * Return true if entity could provide a job.
+ */
+static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
+{
+       if (kfifo_is_empty(&entity->job_queue))
+               return false;
+
+       if (ACCESS_ONCE(entity->dependency))
+               return false;
+
+       return true;
+}
+
 /**
  * Destroy a context entity
  *
@@ -208,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
        amd_sched_wakeup(entity->sched);
 }
 
+static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
+{
+       struct amd_gpu_scheduler *sched = entity->sched;
+       struct fence * fence = entity->dependency;
+       struct amd_sched_fence *s_fence;
+
+       if (fence->context == entity->fence_context) {
+               /* We can ignore fences from ourself */
+               fence_put(entity->dependency);
+               return false;
+       }
+
+       s_fence = to_amd_sched_fence(fence);
+       if (s_fence && s_fence->sched == sched) {
+               /* Fence is from the same scheduler */
+               if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
+                       /* Ignore it when it is already scheduled */
+                       fence_put(entity->dependency);
+                       return false;
+               }
+
+               /* Wait for fence to be scheduled */
+               entity->cb.func = amd_sched_entity_wakeup;
+               list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
+               return true;
+       }
+
+       if (!fence_add_callback(entity->dependency, &entity->cb,
+                               amd_sched_entity_wakeup))
+               return true;
+
+       fence_put(entity->dependency);
+       return false;
+}
+
 static struct amd_sched_job *
 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
 {
        struct amd_gpu_scheduler *sched = entity->sched;
        struct amd_sched_job *sched_job;
 
-       if (ACCESS_ONCE(entity->dependency))
-               return NULL;
-
        if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
                return NULL;
 
-       while ((entity->dependency = sched->ops->dependency(sched_job))) {
-
-               if (entity->dependency->context == entity->fence_context) {
-                       /* We can ignore fences from ourself */
-                       fence_put(entity->dependency);
-                       continue;
-               }
-
-               if (fence_add_callback(entity->dependency, &entity->cb,
-                                      amd_sched_entity_wakeup))
-                       fence_put(entity->dependency);
-               else
+       while ((entity->dependency = sched->ops->dependency(sched_job)))
+               if (amd_sched_entity_add_dependency_cb(entity))
                        return NULL;
-       }
 
        return sched_job;
 }
@@ -273,22 +314,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
  *
  * Returns 0 for success, negative error code otherwise.
  */
-int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
+void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 {
        struct amd_sched_entity *entity = sched_job->s_entity;
-       struct amd_sched_fence *fence = amd_sched_fence_create(
-               entity, sched_job->owner);
-
-       if (!fence)
-               return -ENOMEM;
-
-       fence_get(&fence->base);
-       sched_job->s_fence = fence;
 
        wait_event(entity->sched->job_scheduled,
                   amd_sched_entity_in(sched_job));
        trace_amd_sched_job(sched_job);
-       return 0;
 }
 
 /**
@@ -310,22 +342,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
 }
 
 /**
- * Select next to run
+ * Select next entity to process
 */
-static struct amd_sched_job *
-amd_sched_select_job(struct amd_gpu_scheduler *sched)
+static struct amd_sched_entity *
+amd_sched_select_entity(struct amd_gpu_scheduler *sched)
 {
-       struct amd_sched_job *sched_job;
+       struct amd_sched_entity *entity;
 
        if (!amd_sched_ready(sched))
                return NULL;
 
        /* Kernel run queue has higher priority than normal run queue*/
-       sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
-       if (sched_job == NULL)
-               sched_job = amd_sched_rq_select_job(&sched->sched_rq);
+       entity = amd_sched_rq_select_entity(&sched->kernel_rq);
+       if (entity == NULL)
+               entity = amd_sched_rq_select_entity(&sched->sched_rq);
 
-       return sched_job;
+       return entity;
 }
 
 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
@@ -343,6 +375,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
                list_del_init(&s_fence->list);
                spin_unlock_irqrestore(&sched->fence_list_lock, flags);
        }
+       trace_amd_sched_process_job(s_fence);
        fence_put(&s_fence->base);
        wake_up_interruptible(&sched->wake_up_worker);
 }
@@ -386,13 +419,16 @@ static int amd_sched_main(void *param)
                unsigned long flags;
 
                wait_event_interruptible(sched->wake_up_worker,
-                       kthread_should_stop() ||
-                       (sched_job = amd_sched_select_job(sched)));
+                       (entity = amd_sched_select_entity(sched)) ||
+                       kthread_should_stop());
 
+               if (!entity)
+                       continue;
+
+               sched_job = amd_sched_entity_pop_job(entity);
                if (!sched_job)
                        continue;
 
-               entity = sched_job->s_entity;
                s_fence = sched_job->s_fence;
 
                if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
@@ -405,6 +441,7 @@ static int amd_sched_main(void *param)
 
                atomic_inc(&sched->hw_rq_count);
                fence = sched->ops->run_job(sched_job);
+               amd_sched_fence_scheduled(s_fence);
                if (fence) {
                        r = fence_add_callback(fence, &s_fence->cb,
                                               amd_sched_process_job);
@@ -450,6 +487,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
        init_waitqueue_head(&sched->wake_up_worker);
        init_waitqueue_head(&sched->job_scheduled);
        atomic_set(&sched->hw_rq_count, 0);
+       if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
+               sched_fence_slab = kmem_cache_create(
+                       "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+                       SLAB_HWCACHE_ALIGN, NULL);
+               if (!sched_fence_slab)
+                       return -ENOMEM;
+       }
 
        /* Each scheduler will run on a seperate kernel thread */
        sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -470,4 +514,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
 {
        if (sched->thread)
                kthread_stop(sched->thread);
+       if (atomic_dec_and_test(&sched_fence_slab_ref))
+               kmem_cache_destroy(sched_fence_slab);
 }
index 929e9ac..a0f0ae5 100644 (file)
 #include <linux/kfifo.h>
 #include <linux/fence.h>
 
+#define AMD_SCHED_FENCE_SCHEDULED_BIT  FENCE_FLAG_USER_BITS
+
 struct amd_gpu_scheduler;
 struct amd_sched_rq;
 
+extern struct kmem_cache *sched_fence_slab;
+extern atomic_t sched_fence_slab_ref;
+
 /**
  * A scheduler entity is a wrapper around a job queue or a group
  * of other entities. Entities take turns emitting jobs from their 
@@ -65,6 +70,7 @@ struct amd_sched_rq {
 struct amd_sched_fence {
        struct fence                    base;
        struct fence_cb                 cb;
+       struct list_head                scheduled_cb;
        struct amd_gpu_scheduler        *sched;
        spinlock_t                      lock;
        void                            *owner;
@@ -76,7 +82,6 @@ struct amd_sched_job {
        struct amd_gpu_scheduler        *sched;
        struct amd_sched_entity         *s_entity;
        struct amd_sched_fence          *s_fence;
-       void                            *owner;
 };
 
 extern const struct fence_ops amd_sched_fence_ops;
@@ -128,11 +133,11 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
                          uint32_t jobs);
 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                           struct amd_sched_entity *entity);
-int amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
 
 struct amd_sched_fence *amd_sched_fence_create(
        struct amd_sched_entity *s_entity, void *owner);
+void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
 void amd_sched_fence_signal(struct amd_sched_fence *fence);
 
-
 #endif
index d802638..87c78ee 100644 (file)
@@ -32,9 +32,11 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
        struct amd_sched_fence *fence = NULL;
        unsigned seq;
 
-       fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
+       fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
        if (fence == NULL)
                return NULL;
+
+       INIT_LIST_HEAD(&fence->scheduled_cb);
        fence->owner = owner;
        fence->sched = s_entity->sched;
        spin_lock_init(&fence->lock);
@@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence)
                FENCE_TRACE(&fence->base, "was already signaled\n");
 }
 
+void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
+{
+       struct fence_cb *cur, *tmp;
+
+       set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
+       list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
+               list_del_init(&cur->node);
+               cur->func(&s_fence->base, cur);
+       }
+}
+
 static const char *amd_sched_fence_get_driver_name(struct fence *fence)
 {
        return "amd_sched";
@@ -71,11 +84,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
        return true;
 }
 
+static void amd_sched_fence_release(struct fence *f)
+{
+       struct amd_sched_fence *fence = to_amd_sched_fence(f);
+       kmem_cache_free(sched_fence_slab, fence);
+}
+
 const struct fence_ops amd_sched_fence_ops = {
        .get_driver_name = amd_sched_fence_get_driver_name,
        .get_timeline_name = amd_sched_fence_get_timeline_name,
        .enable_signaling = amd_sched_fence_enable_signaling,
        .signaled = NULL,
        .wait = fence_default_wait,
-       .release = NULL,
+       .release = amd_sched_fence_release,
 };
index 7bb3845..aeee083 100644 (file)
@@ -1432,6 +1432,45 @@ static int atomic_set_prop(struct drm_atomic_state *state,
        return ret;
 }
 
+/**
+ * drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers.
+ *
+ * @dev: drm device to check.
+ * @plane_mask: plane mask for planes that were updated.
+ * @ret: return value, can be -EDEADLK for a retry.
+ *
+ * Before doing an update plane->old_fb is set to plane->fb,
+ * but before dropping the locks old_fb needs to be set to NULL
+ * and plane->fb updated. This is a common operation for each
+ * atomic update, so this call is split off as a helper.
+ */
+void drm_atomic_clean_old_fb(struct drm_device *dev,
+                            unsigned plane_mask,
+                            int ret)
+{
+       struct drm_plane *plane;
+
+       /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
+        * locks (ie. while it is still safe to deref plane->state).  We
+        * need to do this here because the driver entry points cannot
+        * distinguish between legacy and atomic ioctls.
+        */
+       drm_for_each_plane_mask(plane, dev, plane_mask) {
+               if (ret == 0) {
+                       struct drm_framebuffer *new_fb = plane->state->fb;
+                       if (new_fb)
+                               drm_framebuffer_reference(new_fb);
+                       plane->fb = new_fb;
+                       plane->crtc = plane->state->crtc;
+
+                       if (plane->old_fb)
+                               drm_framebuffer_unreference(plane->old_fb);
+               }
+               plane->old_fb = NULL;
+       }
+}
+EXPORT_SYMBOL(drm_atomic_clean_old_fb);
+
 int drm_mode_atomic_ioctl(struct drm_device *dev,
                          void *data, struct drm_file *file_priv)
 {
@@ -1446,7 +1485,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
        struct drm_plane *plane;
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
-       unsigned plane_mask = 0;
+       unsigned plane_mask;
        int ret = 0;
        unsigned int i, j;
 
@@ -1486,6 +1525,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
        state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
 
 retry:
+       plane_mask = 0;
        copied_objs = 0;
        copied_props = 0;
 
@@ -1576,24 +1616,7 @@ retry:
        }
 
 out:
-       /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
-        * locks (ie. while it is still safe to deref plane->state).  We
-        * need to do this here because the driver entry points cannot
-        * distinguish between legacy and atomic ioctls.
-        */
-       drm_for_each_plane_mask(plane, dev, plane_mask) {
-               if (ret == 0) {
-                       struct drm_framebuffer *new_fb = plane->state->fb;
-                       if (new_fb)
-                               drm_framebuffer_reference(new_fb);
-                       plane->fb = new_fb;
-                       plane->crtc = plane->state->crtc;
-
-                       if (plane->old_fb)
-                               drm_framebuffer_unreference(plane->old_fb);
-               }
-               plane->old_fb = NULL;
-       }
+       drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
        if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
                /*
index 0c6f621..e5aec45 100644 (file)
@@ -210,6 +210,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
                return -EINVAL;
        }
 
+       if (!drm_encoder_crtc_ok(new_encoder, connector_state->crtc)) {
+               DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d]\n",
+                                new_encoder->base.id,
+                                new_encoder->name,
+                                connector_state->crtc->base.id);
+               return -EINVAL;
+       }
+
        if (new_encoder == connector_state->best_encoder) {
                DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
                                 connector->base.id,
@@ -1553,6 +1561,9 @@ retry:
                goto fail;
        }
 
+       if (plane_state->crtc && (plane == plane->crtc->cursor))
+               plane_state->state->legacy_cursor_update = true;
+
        ret = __drm_atomic_helper_disable_plane(plane, plane_state);
        if (ret != 0)
                goto fail;
@@ -1605,9 +1616,6 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
        plane_state->src_h = 0;
        plane_state->src_w = 0;
 
-       if (plane->crtc && (plane == plane->crtc->cursor))
-               plane_state->state->legacy_cursor_update = true;
-
        return 0;
 }
 
@@ -1741,6 +1749,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
        struct drm_crtc_state *crtc_state;
        struct drm_plane_state *primary_state;
        struct drm_crtc *crtc = set->crtc;
+       int hdisplay, vdisplay;
        int ret;
 
        crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -1783,19 +1792,21 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
        if (ret != 0)
                return ret;
 
+       drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
+
        drm_atomic_set_fb_for_plane(primary_state, set->fb);
        primary_state->crtc_x = 0;
        primary_state->crtc_y = 0;
-       primary_state->crtc_h = set->mode->vdisplay;
-       primary_state->crtc_w = set->mode->hdisplay;
+       primary_state->crtc_h = vdisplay;
+       primary_state->crtc_w = hdisplay;
        primary_state->src_x = set->x << 16;
        primary_state->src_y = set->y << 16;
        if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
-               primary_state->src_h = set->mode->hdisplay << 16;
-               primary_state->src_w = set->mode->vdisplay << 16;
+               primary_state->src_h = hdisplay << 16;
+               primary_state->src_w = vdisplay << 16;
        } else {
-               primary_state->src_h = set->mode->vdisplay << 16;
-               primary_state->src_w = set->mode->hdisplay << 16;
+               primary_state->src_h = vdisplay << 16;
+               primary_state->src_w = hdisplay << 16;
        }
 
 commit:
index e673c13..69cbab5 100644 (file)
@@ -342,6 +342,7 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
        struct drm_plane *plane;
        struct drm_atomic_state *state;
        int i, ret;
+       unsigned plane_mask;
 
        state = drm_atomic_state_alloc(dev);
        if (!state)
@@ -349,11 +350,10 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
 
        state->acquire_ctx = dev->mode_config.acquire_ctx;
 retry:
+       plane_mask = 0;
        drm_for_each_plane(plane, dev) {
                struct drm_plane_state *plane_state;
 
-               plane->old_fb = plane->fb;
-
                plane_state = drm_atomic_get_plane_state(state, plane);
                if (IS_ERR(plane_state)) {
                        ret = PTR_ERR(plane_state);
@@ -362,6 +362,9 @@ retry:
 
                plane_state->rotation = BIT(DRM_ROTATE_0);
 
+               plane->old_fb = plane->fb;
+               plane_mask |= 1 << drm_plane_index(plane);
+
                /* disable non-primary: */
                if (plane->type == DRM_PLANE_TYPE_PRIMARY)
                        continue;
@@ -382,19 +385,7 @@ retry:
        ret = drm_atomic_commit(state);
 
 fail:
-       drm_for_each_plane(plane, dev) {
-               if (ret == 0) {
-                       struct drm_framebuffer *new_fb = plane->state->fb;
-                       if (new_fb)
-                               drm_framebuffer_reference(new_fb);
-                       plane->fb = new_fb;
-                       plane->crtc = plane->state->crtc;
-
-                       if (plane->old_fb)
-                               drm_framebuffer_unreference(plane->old_fb);
-               }
-               plane->old_fb = NULL;
-       }
+       drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
        if (ret == -EDEADLK)
                goto backoff;
@@ -1236,7 +1227,9 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
        struct drm_fb_helper *fb_helper = info->par;
        struct drm_device *dev = fb_helper->dev;
        struct drm_atomic_state *state;
+       struct drm_plane *plane;
        int i, ret;
+       unsigned plane_mask;
 
        state = drm_atomic_state_alloc(dev);
        if (!state)
@@ -1244,19 +1237,22 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
 
        state->acquire_ctx = dev->mode_config.acquire_ctx;
 retry:
+       plane_mask = 0;
        for(i = 0; i < fb_helper->crtc_count; i++) {
                struct drm_mode_set *mode_set;
 
                mode_set = &fb_helper->crtc_info[i].mode_set;
 
-               mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb;
-
                mode_set->x = var->xoffset;
                mode_set->y = var->yoffset;
 
                ret = __drm_atomic_helper_set_config(mode_set, state);
                if (ret != 0)
                        goto fail;
+
+               plane = mode_set->crtc->primary;
+               plane_mask |= drm_plane_index(plane);
+               plane->old_fb = plane->fb;
        }
 
        ret = drm_atomic_commit(state);
@@ -1268,26 +1264,7 @@ retry:
 
 
 fail:
-       for(i = 0; i < fb_helper->crtc_count; i++) {
-               struct drm_mode_set *mode_set;
-               struct drm_plane *plane;
-
-               mode_set = &fb_helper->crtc_info[i].mode_set;
-               plane = mode_set->crtc->primary;
-
-               if (ret == 0) {
-                       struct drm_framebuffer *new_fb = plane->state->fb;
-
-                       if (new_fb)
-                               drm_framebuffer_reference(new_fb);
-                       plane->fb = new_fb;
-                       plane->crtc = plane->state->crtc;
-
-                       if (plane->old_fb)
-                               drm_framebuffer_unreference(plane->old_fb);
-               }
-               plane->old_fb = NULL;
-       }
+       drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
        if (ret == -EDEADLK)
                goto backoff;
index 8afda45..95bb27d 100644 (file)
@@ -351,6 +351,8 @@ enum intel_dpll_id {
        /* hsw/bdw */
        DPLL_ID_WRPLL1 = 0,
        DPLL_ID_WRPLL2 = 1,
+       DPLL_ID_SPLL = 2,
+
        /* skl */
        DPLL_ID_SKL_DPLL1 = 0,
        DPLL_ID_SKL_DPLL2 = 1,
@@ -367,6 +369,7 @@ struct intel_dpll_hw_state {
 
        /* hsw, bdw */
        uint32_t wrpll;
+       uint32_t spll;
 
        /* skl */
        /*
@@ -2648,6 +2651,7 @@ struct i915_params {
        int enable_cmd_parser;
        /* leave bools at the end to not create holes */
        bool enable_hangcheck;
+       bool fastboot;
        bool prefault_disable;
        bool load_detect_test;
        bool reset;
index 5cf4a19..91bb1fc 100644 (file)
@@ -3809,6 +3809,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
                               struct drm_file *file)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
        enum i915_cache_level level;
@@ -3837,9 +3838,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
+       intel_runtime_pm_get(dev_priv);
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
-               return ret;
+               goto rpm_put;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (&obj->base == NULL) {
@@ -3852,6 +3855,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
        drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
+rpm_put:
+       intel_runtime_pm_put(dev_priv);
+
        return ret;
 }
 
index 96bb238..4be13a5 100644 (file)
@@ -40,6 +40,7 @@ struct i915_params i915 __read_mostly = {
        .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
        .disable_power_well = -1,
        .enable_ips = 1,
+       .fastboot = 0,
        .prefault_disable = 0,
        .load_detect_test = 0,
        .reset = true,
@@ -133,6 +134,10 @@ MODULE_PARM_DESC(disable_power_well,
 module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
 MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
 
+module_param_named(fastboot, i915.fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot,
+       "Try to skip unnecessary mode sets at boot time (default: false)");
+
 module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
 MODULE_PARM_DESC(prefault_disable,
        "Disable page prefaulting for pread/pwrite/reloc (default:false). "
index b84aaa0..6a2c76e 100644 (file)
@@ -138,18 +138,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
        pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
 }
 
-static void hsw_crt_pre_enable(struct intel_encoder *encoder)
-{
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
-       I915_WRITE(SPLL_CTL,
-                  SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
-       POSTING_READ(SPLL_CTL);
-       udelay(20);
-}
-
 /* Note: The caller is required to filter out dpms modes not supported by the
  * platform. */
 static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -216,19 +204,6 @@ static void pch_post_disable_crt(struct intel_encoder *encoder)
        intel_disable_crt(encoder);
 }
 
-static void hsw_crt_post_disable(struct intel_encoder *encoder)
-{
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t val;
-
-       DRM_DEBUG_KMS("Disabling SPLL\n");
-       val = I915_READ(SPLL_CTL);
-       WARN_ON(!(val & SPLL_PLL_ENABLE));
-       I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
-       POSTING_READ(SPLL_CTL);
-}
-
 static void intel_enable_crt(struct intel_encoder *encoder)
 {
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -280,6 +255,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
        if (HAS_DDI(dev)) {
                pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
                pipe_config->port_clock = 135000 * 2;
+
+               pipe_config->dpll_hw_state.wrpll = 0;
+               pipe_config->dpll_hw_state.spll =
+                       SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
        }
 
        return true;
@@ -860,8 +839,6 @@ void intel_crt_init(struct drm_device *dev)
        if (HAS_DDI(dev)) {
                crt->base.get_config = hsw_crt_get_config;
                crt->base.get_hw_state = intel_ddi_get_hw_state;
-               crt->base.pre_enable = hsw_crt_pre_enable;
-               crt->base.post_disable = hsw_crt_post_disable;
        } else {
                crt->base.get_config = intel_crt_get_config;
                crt->base.get_hw_state = intel_crt_get_hw_state;
index b25e99a..a6752a6 100644 (file)
@@ -1286,6 +1286,18 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
                }
 
                crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
+       } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) {
+               struct drm_atomic_state *state = crtc_state->base.state;
+               struct intel_shared_dpll_config *spll =
+                       &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL];
+
+               if (spll->crtc_mask &&
+                   WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll))
+                       return false;
+
+               crtc_state->shared_dpll = DPLL_ID_SPLL;
+               spll->hw_state.spll = crtc_state->dpll_hw_state.spll;
+               spll->crtc_mask |= 1 << intel_crtc->pipe;
        }
 
        return true;
@@ -2437,7 +2449,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
        }
 }
 
-static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
+static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
                               struct intel_shared_dpll *pll)
 {
        I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
@@ -2445,8 +2457,16 @@ static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
        udelay(20);
 }
 
-static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
+static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
                                struct intel_shared_dpll *pll)
+{
+       I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
+       POSTING_READ(SPLL_CTL);
+       udelay(20);
+}
+
+static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
+                                 struct intel_shared_dpll *pll)
 {
        uint32_t val;
 
@@ -2455,9 +2475,19 @@ static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
        POSTING_READ(WRPLL_CTL(pll->id));
 }
 
-static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
-                                    struct intel_shared_dpll *pll,
-                                    struct intel_dpll_hw_state *hw_state)
+static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
+                                struct intel_shared_dpll *pll)
+{
+       uint32_t val;
+
+       val = I915_READ(SPLL_CTL);
+       I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+       POSTING_READ(SPLL_CTL);
+}
+
+static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
+                                      struct intel_shared_dpll *pll,
+                                      struct intel_dpll_hw_state *hw_state)
 {
        uint32_t val;
 
@@ -2470,25 +2500,50 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
        return val & WRPLL_PLL_ENABLE;
 }
 
+static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
+                                     struct intel_shared_dpll *pll,
+                                     struct intel_dpll_hw_state *hw_state)
+{
+       uint32_t val;
+
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+               return false;
+
+       val = I915_READ(SPLL_CTL);
+       hw_state->spll = val;
+
+       return val & SPLL_PLL_ENABLE;
+}
+
+
 static const char * const hsw_ddi_pll_names[] = {
        "WRPLL 1",
        "WRPLL 2",
+       "SPLL"
 };
 
 static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
 {
        int i;
 
-       dev_priv->num_shared_dpll = 2;
+       dev_priv->num_shared_dpll = 3;
 
-       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+       for (i = 0; i < 2; i++) {
                dev_priv->shared_dplls[i].id = i;
                dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
-               dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
-               dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
+               dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable;
+               dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable;
                dev_priv->shared_dplls[i].get_hw_state =
-                       hsw_ddi_pll_get_hw_state;
+                       hsw_ddi_wrpll_get_hw_state;
        }
+
+       /* SPLL is special, but needs to be initialized anyway.. */
+       dev_priv->shared_dplls[i].id = i;
+       dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
+       dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable;
+       dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable;
+       dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state;
+
 }
 
 static const char * const skl_ddi_pll_names[] = {
index f62ffc0..71860f8 100644 (file)
@@ -2646,11 +2646,13 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        return;
 
 valid_fb:
-       plane_state->src_x = plane_state->src_y = 0;
+       plane_state->src_x = 0;
+       plane_state->src_y = 0;
        plane_state->src_w = fb->width << 16;
        plane_state->src_h = fb->height << 16;
 
-       plane_state->crtc_x = plane_state->src_y = 0;
+       plane_state->crtc_x = 0;
+       plane_state->crtc_y = 0;
        plane_state->crtc_w = fb->width;
        plane_state->crtc_h = fb->height;
 
@@ -4237,6 +4239,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
        struct intel_shared_dpll *pll;
        struct intel_shared_dpll_config *shared_dpll;
        enum intel_dpll_id i;
+       int max = dev_priv->num_shared_dpll;
 
        shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
 
@@ -4271,9 +4274,11 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
                WARN_ON(shared_dpll[i].crtc_mask);
 
                goto found;
-       }
+       } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
+               /* Do not consider SPLL */
+               max = 2;
 
-       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+       for (i = 0; i < max; i++) {
                pll = &dev_priv->shared_dplls[i];
 
                /* Only want to check enabled timings first */
@@ -9723,6 +9728,8 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
        case PORT_CLK_SEL_WRPLL2:
                pipe_config->shared_dpll = DPLL_ID_WRPLL2;
                break;
+       case PORT_CLK_SEL_SPLL:
+               pipe_config->shared_dpll = DPLL_ID_SPLL;
        }
 }
 
@@ -12003,9 +12010,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                              pipe_config->dpll_hw_state.cfgcr1,
                              pipe_config->dpll_hw_state.cfgcr2);
        } else if (HAS_DDI(dev)) {
-               DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n",
+               DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
                              pipe_config->ddi_pll_sel,
-                             pipe_config->dpll_hw_state.wrpll);
+                             pipe_config->dpll_hw_state.wrpll,
+                             pipe_config->dpll_hw_state.spll);
        } else {
                DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
                              "fp0: 0x%x, fp1: 0x%x\n",
@@ -12528,6 +12536,7 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
        PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
        PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
+       PIPE_CONF_CHECK_X(dpll_hw_state.spll);
        PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
        PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
        PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
@@ -13032,6 +13041,9 @@ static int intel_atomic_check(struct drm_device *dev,
                struct intel_crtc_state *pipe_config =
                        to_intel_crtc_state(crtc_state);
 
+               memset(&to_intel_crtc(crtc)->atomic, 0,
+                      sizeof(struct intel_crtc_atomic_commit));
+
                /* Catch I915_MODE_FLAG_INHERITED */
                if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
                        crtc_state->mode_changed = true;
@@ -13056,7 +13068,8 @@ static int intel_atomic_check(struct drm_device *dev,
                if (ret)
                        return ret;
 
-               if (intel_pipe_config_compare(state->dev,
+               if (i915.fastboot &&
+                   intel_pipe_config_compare(state->dev,
                                        to_intel_crtc_state(crtc->state),
                                        pipe_config, true)) {
                        crtc_state->mode_changed = false;
@@ -14364,16 +14377,17 @@ static int intel_framebuffer_init(struct drm_device *dev,
 static struct drm_framebuffer *
 intel_user_framebuffer_create(struct drm_device *dev,
                              struct drm_file *filp,
-                             struct drm_mode_fb_cmd2 *mode_cmd)
+                             struct drm_mode_fb_cmd2 *user_mode_cmd)
 {
        struct drm_i915_gem_object *obj;
+       struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
-                                               mode_cmd->handles[0]));
+                                               mode_cmd.handles[0]));
        if (&obj->base == NULL)
                return ERR_PTR(-ENOENT);
 
-       return intel_framebuffer_create(dev, mode_cmd, obj);
+       return intel_framebuffer_create(dev, &mode_cmd, obj);
 }
 
 #ifndef CONFIG_DRM_FBDEV_EMULATION
@@ -14705,6 +14719,9 @@ static struct intel_quirk intel_quirks[] = {
        /* Apple Macbook 2,1 (Core 2 T7400) */
        { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
 
+       /* Apple Macbook 4,1 */
+       { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
+
        /* Toshiba CB35 Chromebook (Celeron 2955U) */
        { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
 
index d52a15d..071a76b 100644 (file)
@@ -4449,7 +4449,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
        POSTING_READ(GEN6_RPNSWREQ);
 
        dev_priv->rps.cur_freq = val;
-       trace_intel_gpu_freq_change(val * 50);
+       trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 }
 
 static void valleyview_set_rps(struct drm_device *dev, u8 val)
@@ -7255,7 +7255,8 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
        if (IS_GEN9(dev_priv->dev))
-               return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
+               return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
+                                        GEN9_FREQ_SCALER);
        else if (IS_CHERRYVIEW(dev_priv->dev))
                return chv_gpu_freq(dev_priv, val);
        else if (IS_VALLEYVIEW(dev_priv->dev))
@@ -7267,13 +7268,14 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
        if (IS_GEN9(dev_priv->dev))
-               return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
+               return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
+                                        GT_FREQUENCY_MULTIPLIER);
        else if (IS_CHERRYVIEW(dev_priv->dev))
                return chv_freq_opcode(dev_priv, val);
        else if (IS_VALLEYVIEW(dev_priv->dev))
                return byt_freq_opcode(dev_priv, val);
        else
-               return val / GT_FREQUENCY_MULTIPLIER;
+               return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
 }
 
 struct request_boost {
index 4f2068f..a7bf6a9 100644 (file)
@@ -70,6 +70,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
        BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
        BUG_ON(pixels_current == pixels_prev);
 
+       if (!handle || !file_priv) {
+               mga_hide_cursor(mdev);
+               return 0;
+       }
+
        obj = drm_gem_object_lookup(dev, file_priv, handle);
        if (!obj)
                return -ENOENT;
@@ -88,12 +93,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
                goto out_unreserve1;
        }
 
-       if (!handle) {
-               mga_hide_cursor(mdev);
-               ret = 0;
-               goto out1;
-       }
-
        /* Move cursor buffers into VRAM if they aren't already */
        if (!pixels_1->pin_count) {
                ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM,
index 28bc202..40f845e 100644 (file)
@@ -7,6 +7,7 @@ struct nvkm_instmem {
        const struct nvkm_instmem_func *func;
        struct nvkm_subdev subdev;
 
+       spinlock_t lock;
        struct list_head list;
        u32 reserved;
 
index 8b8332e..d5e6938 100644 (file)
@@ -367,6 +367,7 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
                return -ENODEV;
        }
        obj = (union acpi_object *)buffer.pointer;
+       len = min(len, (int)obj->buffer.length);
        memcpy(bios+offset, obj->buffer.pointer, len);
        kfree(buffer.pointer);
        return len;
index 3050042..a02813e 100644 (file)
@@ -39,6 +39,7 @@
 
 #include <nvif/client.h>
 #include <nvif/device.h>
+#include <nvif/ioctl.h>
 
 #include <drmP.h>
 
@@ -65,9 +66,10 @@ struct nouveau_drm_tile {
 };
 
 enum nouveau_drm_object_route {
-       NVDRM_OBJECT_NVIF = 0,
+       NVDRM_OBJECT_NVIF = NVIF_IOCTL_V0_OWNER_NVIF,
        NVDRM_OBJECT_USIF,
        NVDRM_OBJECT_ABI16,
+       NVDRM_OBJECT_ANY = NVIF_IOCTL_V0_OWNER_ANY,
 };
 
 enum nouveau_drm_notify_route {
index 89dc4ce..6ae1b34 100644 (file)
@@ -313,7 +313,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
        if (nvif_unpack(argv->v0, 0, 0, true)) {
                /* block access to objects not created via this interface */
                owner = argv->v0.owner;
-               argv->v0.owner = NVDRM_OBJECT_USIF;
+               if (argv->v0.object == 0ULL)
+                       argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
+               else
+                       argv->v0.owner = NVDRM_OBJECT_USIF;
        } else
                goto done;
 
index e3c783d..caf22b5 100644 (file)
@@ -278,6 +278,12 @@ nvkm_device_pci_10de_0fe3[] = {
        {}
 };
 
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0fe4[] = {
+       { 0x144d, 0xc740, NULL, { .War00C800_0 = true } },
+       {}
+};
+
 static const struct nvkm_device_pci_vendor
 nvkm_device_pci_10de_104b[] = {
        { 0x1043, 0x844c, "GeForce GT 625" },
@@ -688,6 +694,12 @@ nvkm_device_pci_10de_1199[] = {
        {}
 };
 
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_11e0[] = {
+       { 0x1558, 0x5106, NULL, { .War00C800_0 = true } },
+       {}
+};
+
 static const struct nvkm_device_pci_vendor
 nvkm_device_pci_10de_11e3[] = {
        { 0x17aa, 0x3683, "GeForce GTX 760A" },
@@ -1370,7 +1382,7 @@ nvkm_device_pci_10de[] = {
        { 0x0fe1, "GeForce GT 730M" },
        { 0x0fe2, "GeForce GT 745M" },
        { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
-       { 0x0fe4, "GeForce GT 750M" },
+       { 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 },
        { 0x0fe9, "GeForce GT 750M" },
        { 0x0fea, "GeForce GT 755M" },
        { 0x0fec, "GeForce 710A" },
@@ -1485,7 +1497,7 @@ nvkm_device_pci_10de[] = {
        { 0x11c6, "GeForce GTX 650 Ti" },
        { 0x11c8, "GeForce GTX 650" },
        { 0x11cb, "GeForce GT 740" },
-       { 0x11e0, "GeForce GTX 770M" },
+       { 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 },
        { 0x11e1, "GeForce GTX 765M" },
        { 0x11e2, "GeForce GTX 765M" },
        { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
index b5b8759..74de7a9 100644 (file)
@@ -207,6 +207,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
                        const u32 b =  beta * gr->ppc_tpc_nr[gpc][ppc];
                        const u32 t = timeslice_mode;
                        const u32 o = PPC_UNIT(gpc, ppc, 0);
+                       if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+                               continue;
                        mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
                        mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
                        bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
index 194afe9..7dacb3c 100644 (file)
@@ -52,10 +52,12 @@ mmio_list_base:
 #endif
 
 #ifdef INCLUDE_CODE
+#define gpc_addr(reg,addr)                                                    /*
+*/     imm32(reg,addr)                                                       /*
+*/     or reg NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE
 #define gpc_wr32(addr,reg)                                                    /*
+*/     gpc_addr($r14,addr)                                                   /*
 */     mov b32 $r15 reg                                                      /*
-*/     imm32($r14, addr)                                                     /*
-*/     or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE                    /*
 */     call(nv_wr32)
 
 // reports an exception to the host
@@ -161,7 +163,7 @@ init:
 
 #if NV_PGRAPH_GPCX_UNK__SIZE > 0
        // figure out which, and how many, UNKs are actually present
-       imm32($r14, 0x500c30)
+       gpc_addr($r14, 0x500c30)
        clear b32 $r2
        clear b32 $r3
        clear b32 $r4
index 64d07df..bb820ff 100644 (file)
@@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = {
        0x03f01200,
        0x0002d000,
        0x17f104bd,
-       0x10fe0542,
+       0x10fe0545,
        0x0007f100,
        0x0003f007,
        0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gf117_grgpc_code[] = {
        0x02d00103,
        0xf104bd00,
        0xf00c30e7,
-       0x24bd50e3,
-       0x44bd34bd,
-/* 0x0430: init_unk_loop */
-       0xb06821f4,
-       0x0bf400f6,
-       0x01f7f00f,
-       0xfd04f2bb,
-       0x30b6054f,
-/* 0x0445: init_unk_next */
-       0x0120b601,
-       0xb004e0b6,
-       0x1bf40126,
-/* 0x0451: init_unk_done */
-       0x070380e2,
-       0xf1080480,
-       0xf0010027,
-       0x22cf0223,
-       0x9534bd00,
-       0x07f10825,
-       0x03f0c000,
-       0x0005d001,
-       0x07f104bd,
-       0x03f0c100,
-       0x0005d001,
-       0x0e9804bd,
-       0x010f9800,
-       0x015021f5,
-       0xbb002fbb,
-       0x0e98003f,
-       0x020f9801,
-       0x015021f5,
-       0xfd050e98,
-       0x2ebb00ef,
-       0x003ebb00,
-       0x98020e98,
-       0x21f5030f,
-       0x0e980150,
-       0x00effd07,
-       0xbb002ebb,
-       0x35b6003e,
-       0x0007f102,
-       0x0103f0d3,
-       0xbd0003d0,
-       0x0825b604,
-       0xb60635b6,
-       0x30b60120,
-       0x0824b601,
-       0xb90834b6,
-       0x21f5022f,
-       0x2fbb02d3,
-       0x003fbb00,
-       0x010007f1,
-       0xd00203f0,
+       0xe5f050e3,
+       0xbd24bd01,
+/* 0x0433: init_unk_loop */
+       0xf444bd34,
+       0xf6b06821,
+       0x0f0bf400,
+       0xbb01f7f0,
+       0x4ffd04f2,
+       0x0130b605,
+/* 0x0448: init_unk_next */
+       0xb60120b6,
+       0x26b004e0,
+       0xe21bf401,
+/* 0x0454: init_unk_done */
+       0x80070380,
+       0x27f10804,
+       0x23f00100,
+       0x0022cf02,
+       0x259534bd,
+       0x0007f108,
+       0x0103f0c0,
+       0xbd0005d0,
+       0x0007f104,
+       0x0103f0c1,
+       0xbd0005d0,
+       0x000e9804,
+       0xf5010f98,
+       0xbb015021,
+       0x3fbb002f,
+       0x010e9800,
+       0xf5020f98,
+       0x98015021,
+       0xeffd050e,
+       0x002ebb00,
+       0x98003ebb,
+       0x0f98020e,
+       0x5021f503,
+       0x070e9801,
+       0xbb00effd,
+       0x3ebb002e,
+       0x0235b600,
+       0xd30007f1,
+       0xd00103f0,
        0x04bd0003,
-       0x29f024bd,
-       0x0007f11f,
-       0x0203f008,
-       0xbd0002d0,
-/* 0x0505: main */
-       0x0031f404,
-       0xf00028f4,
-       0x21f424d7,
-       0xf401f439,
-       0xf404e4b0,
-       0x81fe1e18,
-       0x0627f001,
-       0x12fd20bd,
-       0x01e4b604,
-       0xfe051efd,
-       0x21f50018,
-       0x0ef405fa,
-/* 0x0535: main_not_ctx_xfer */
-       0x10ef94d3,
-       0xf501f5f0,
-       0xf4037e21,
-/* 0x0542: ih */
-       0x80f9c60e,
-       0xf90188fe,
-       0xf990f980,
-       0xf9b0f9a0,
-       0xf9e0f9d0,
-       0xf104bdf0,
-       0xf00200a7,
-       0xaacf00a3,
-       0x04abc400,
-       0xf02c0bf4,
-       0xe7f124d7,
-       0xe3f01a00,
-       0x00eecf00,
-       0x1900f7f1,
-       0xcf00f3f0,
-       0x21f400ff,
-       0x01e7f004,
-       0x1d0007f1,
-       0xd00003f0,
-       0x04bd000e,
-/* 0x0590: ih_no_fifo */
-       0x010007f1,
-       0xd00003f0,
-       0x04bd000a,
-       0xe0fcf0fc,
-       0xb0fcd0fc,
-       0x90fca0fc,
-       0x88fe80fc,
-       0xf480fc00,
-       0x01f80032,
-/* 0x05b4: hub_barrier_done */
-       0x9801f7f0,
-       0xfebb040e,
-       0x02ffb904,
-       0x9418e7f1,
-       0xf440e3f0,
-       0x00f89d21,
-/* 0x05cc: ctx_redswitch */
-       0xf120f7f0,
+       0xb60825b6,
+       0x20b60635,
+       0x0130b601,
+       0xb60824b6,
+       0x2fb90834,
+       0xd321f502,
+       0x002fbb02,
+       0xf1003fbb,
+       0xf0010007,
+       0x03d00203,
+       0xbd04bd00,
+       0x1f29f024,
+       0x080007f1,
+       0xd00203f0,
+       0x04bd0002,
+/* 0x0508: main */
+       0xf40031f4,
+       0xd7f00028,
+       0x3921f424,
+       0xb0f401f4,
+       0x18f404e4,
+       0x0181fe1e,
+       0xbd0627f0,
+       0x0412fd20,
+       0xfd01e4b6,
+       0x18fe051e,
+       0xfd21f500,
+       0xd30ef405,
+/* 0x0538: main_not_ctx_xfer */
+       0xf010ef94,
+       0x21f501f5,
+       0x0ef4037e,
+/* 0x0545: ih */
+       0xfe80f9c6,
+       0x80f90188,
+       0xa0f990f9,
+       0xd0f9b0f9,
+       0xf0f9e0f9,
+       0xa7f104bd,
+       0xa3f00200,
+       0x00aacf00,
+       0xf404abc4,
+       0xd7f02c0b,
+       0x00e7f124,
+       0x00e3f01a,
+       0xf100eecf,
+       0xf01900f7,
+       0xffcf00f3,
+       0x0421f400,
+       0xf101e7f0,
+       0xf01d0007,
+       0x0ed00003,
+/* 0x0593: ih_no_fifo */
+       0xf104bd00,
+       0xf0010007,
+       0x0ad00003,
+       0xfc04bd00,
+       0xfce0fcf0,
+       0xfcb0fcd0,
+       0xfc90fca0,
+       0x0088fe80,
+       0x32f480fc,
+/* 0x05b7: hub_barrier_done */
+       0xf001f800,
+       0x0e9801f7,
+       0x04febb04,
+       0xf102ffb9,
+       0xf09418e7,
+       0x21f440e3,
+/* 0x05cf: ctx_redswitch */
+       0xf000f89d,
+       0x07f120f7,
+       0x03f08500,
+       0x000fd001,
+       0xe7f004bd,
+/* 0x05e1: ctx_redswitch_delay */
+       0x01e2b608,
+       0xf1fd1bf4,
+       0xf10800f5,
+       0xf10200f5,
        0xf0850007,
        0x0fd00103,
-       0xf004bd00,
-/* 0x05de: ctx_redswitch_delay */
-       0xe2b608e7,
-       0xfd1bf401,
-       0x0800f5f1,
-       0x0200f5f1,
-       0x850007f1,
-       0xd00103f0,
-       0x04bd000f,
-/* 0x05fa: ctx_xfer */
-       0x07f100f8,
-       0x03f08100,
-       0x000fd002,
-       0x11f404bd,
-       0xcc21f507,
-/* 0x060d: ctx_xfer_not_load */
-       0x6a21f505,
-       0xf124bd02,
-       0xf047fc07,
-       0x02d00203,
-       0xf004bd00,
-       0x20b6012c,
-       0xfc07f103,
-       0x0203f04a,
-       0xbd0002d0,
-       0x01acf004,
-       0xf102a5f0,
-       0xf00000b7,
-       0x0c9850b3,
-       0x0fc4b604,
-       0x9800bcbb,
-       0x0d98000c,
-       0x00e7f001,
-       0x016f21f5,
-       0xf101acf0,
-       0xf04000b7,
-       0x0c9850b3,
-       0x0fc4b604,
-       0x9800bcbb,
-       0x0d98010c,
-       0x060f9802,
-       0x0800e7f1,
-       0x016f21f5,
+       0xf804bd00,
+/* 0x05fd: ctx_xfer */
+       0x0007f100,
+       0x0203f081,
+       0xbd000fd0,
+       0x0711f404,
+       0x05cf21f5,
+/* 0x0610: ctx_xfer_not_load */
+       0x026a21f5,
+       0x07f124bd,
+       0x03f047fc,
+       0x0002d002,
+       0x2cf004bd,
+       0x0320b601,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd0002,
        0xf001acf0,
-       0xb7f104a5,
-       0xb3f03000,
+       0xb7f102a5,
+       0xb3f00000,
        0x040c9850,
        0xbb0fc4b6,
        0x0c9800bc,
-       0x030d9802,
-       0xf1080f98,
-       0xf50200e7,
-       0xf5016f21,
-       0xf4025e21,
-       0x12f40601,
-/* 0x06a9: ctx_xfer_post */
-       0x7f21f507,
-/* 0x06ad: ctx_xfer_done */
-       0xb421f502,
-       0x0000f805,
-       0x00000000,
+       0x010d9800,
+       0xf500e7f0,
+       0xf0016f21,
+       0xb7f101ac,
+       0xb3f04000,
+       0x040c9850,
+       0xbb0fc4b6,
+       0x0c9800bc,
+       0x020d9801,
+       0xf1060f98,
+       0xf50800e7,
+       0xf0016f21,
+       0xa5f001ac,
+       0x00b7f104,
+       0x50b3f030,
+       0xb6040c98,
+       0xbcbb0fc4,
+       0x020c9800,
+       0x98030d98,
+       0xe7f1080f,
+       0x21f50200,
+       0x21f5016f,
+       0x01f4025e,
+       0x0712f406,
+/* 0x06ac: ctx_xfer_post */
+       0x027f21f5,
+/* 0x06b0: ctx_xfer_done */
+       0x05b721f5,
+       0x000000f8,
        0x00000000,
        0x00000000,
        0x00000000,
index 2f59643..911976d 100644 (file)
@@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = {
        0x03f01200,
        0x0002d000,
        0x17f104bd,
-       0x10fe0542,
+       0x10fe0545,
        0x0007f100,
        0x0003f007,
        0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gk104_grgpc_code[] = {
        0x02d00103,
        0xf104bd00,
        0xf00c30e7,
-       0x24bd50e3,
-       0x44bd34bd,
-/* 0x0430: init_unk_loop */
-       0xb06821f4,
-       0x0bf400f6,
-       0x01f7f00f,
-       0xfd04f2bb,
-       0x30b6054f,
-/* 0x0445: init_unk_next */
-       0x0120b601,
-       0xb004e0b6,
-       0x1bf40126,
-/* 0x0451: init_unk_done */
-       0x070380e2,
-       0xf1080480,
-       0xf0010027,
-       0x22cf0223,
-       0x9534bd00,
-       0x07f10825,
-       0x03f0c000,
-       0x0005d001,
-       0x07f104bd,
-       0x03f0c100,
-       0x0005d001,
-       0x0e9804bd,
-       0x010f9800,
-       0x015021f5,
-       0xbb002fbb,
-       0x0e98003f,
-       0x020f9801,
-       0x015021f5,
-       0xfd050e98,
-       0x2ebb00ef,
-       0x003ebb00,
-       0x98020e98,
-       0x21f5030f,
-       0x0e980150,
-       0x00effd07,
-       0xbb002ebb,
-       0x35b6003e,
-       0x0007f102,
-       0x0103f0d3,
-       0xbd0003d0,
-       0x0825b604,
-       0xb60635b6,
-       0x30b60120,
-       0x0824b601,
-       0xb90834b6,
-       0x21f5022f,
-       0x2fbb02d3,
-       0x003fbb00,
-       0x010007f1,
-       0xd00203f0,
+       0xe5f050e3,
+       0xbd24bd01,
+/* 0x0433: init_unk_loop */
+       0xf444bd34,
+       0xf6b06821,
+       0x0f0bf400,
+       0xbb01f7f0,
+       0x4ffd04f2,
+       0x0130b605,
+/* 0x0448: init_unk_next */
+       0xb60120b6,
+       0x26b004e0,
+       0xe21bf401,
+/* 0x0454: init_unk_done */
+       0x80070380,
+       0x27f10804,
+       0x23f00100,
+       0x0022cf02,
+       0x259534bd,
+       0x0007f108,
+       0x0103f0c0,
+       0xbd0005d0,
+       0x0007f104,
+       0x0103f0c1,
+       0xbd0005d0,
+       0x000e9804,
+       0xf5010f98,
+       0xbb015021,
+       0x3fbb002f,
+       0x010e9800,
+       0xf5020f98,
+       0x98015021,
+       0xeffd050e,
+       0x002ebb00,
+       0x98003ebb,
+       0x0f98020e,
+       0x5021f503,
+       0x070e9801,
+       0xbb00effd,
+       0x3ebb002e,
+       0x0235b600,
+       0xd30007f1,
+       0xd00103f0,
        0x04bd0003,
-       0x29f024bd,
-       0x0007f11f,
-       0x0203f008,
-       0xbd0002d0,
-/* 0x0505: main */
-       0x0031f404,
-       0xf00028f4,
-       0x21f424d7,
-       0xf401f439,
-       0xf404e4b0,
-       0x81fe1e18,
-       0x0627f001,
-       0x12fd20bd,
-       0x01e4b604,
-       0xfe051efd,
-       0x21f50018,
-       0x0ef405fa,
-/* 0x0535: main_not_ctx_xfer */
-       0x10ef94d3,
-       0xf501f5f0,
-       0xf4037e21,
-/* 0x0542: ih */
-       0x80f9c60e,
-       0xf90188fe,
-       0xf990f980,
-       0xf9b0f9a0,
-       0xf9e0f9d0,
-       0xf104bdf0,
-       0xf00200a7,
-       0xaacf00a3,
-       0x04abc400,
-       0xf02c0bf4,
-       0xe7f124d7,
-       0xe3f01a00,
-       0x00eecf00,
-       0x1900f7f1,
-       0xcf00f3f0,
-       0x21f400ff,
-       0x01e7f004,
-       0x1d0007f1,
-       0xd00003f0,
-       0x04bd000e,
-/* 0x0590: ih_no_fifo */
-       0x010007f1,
-       0xd00003f0,
-       0x04bd000a,
-       0xe0fcf0fc,
-       0xb0fcd0fc,
-       0x90fca0fc,
-       0x88fe80fc,
-       0xf480fc00,
-       0x01f80032,
-/* 0x05b4: hub_barrier_done */
-       0x9801f7f0,
-       0xfebb040e,
-       0x02ffb904,
-       0x9418e7f1,
-       0xf440e3f0,
-       0x00f89d21,
-/* 0x05cc: ctx_redswitch */
-       0xf120f7f0,
+       0xb60825b6,
+       0x20b60635,
+       0x0130b601,
+       0xb60824b6,
+       0x2fb90834,
+       0xd321f502,
+       0x002fbb02,
+       0xf1003fbb,
+       0xf0010007,
+       0x03d00203,
+       0xbd04bd00,
+       0x1f29f024,
+       0x080007f1,
+       0xd00203f0,
+       0x04bd0002,
+/* 0x0508: main */
+       0xf40031f4,
+       0xd7f00028,
+       0x3921f424,
+       0xb0f401f4,
+       0x18f404e4,
+       0x0181fe1e,
+       0xbd0627f0,
+       0x0412fd20,
+       0xfd01e4b6,
+       0x18fe051e,
+       0xfd21f500,
+       0xd30ef405,
+/* 0x0538: main_not_ctx_xfer */
+       0xf010ef94,
+       0x21f501f5,
+       0x0ef4037e,
+/* 0x0545: ih */
+       0xfe80f9c6,
+       0x80f90188,
+       0xa0f990f9,
+       0xd0f9b0f9,
+       0xf0f9e0f9,
+       0xa7f104bd,
+       0xa3f00200,
+       0x00aacf00,
+       0xf404abc4,
+       0xd7f02c0b,
+       0x00e7f124,
+       0x00e3f01a,
+       0xf100eecf,
+       0xf01900f7,
+       0xffcf00f3,
+       0x0421f400,
+       0xf101e7f0,
+       0xf01d0007,
+       0x0ed00003,
+/* 0x0593: ih_no_fifo */
+       0xf104bd00,
+       0xf0010007,
+       0x0ad00003,
+       0xfc04bd00,
+       0xfce0fcf0,
+       0xfcb0fcd0,
+       0xfc90fca0,
+       0x0088fe80,
+       0x32f480fc,
+/* 0x05b7: hub_barrier_done */
+       0xf001f800,
+       0x0e9801f7,
+       0x04febb04,
+       0xf102ffb9,
+       0xf09418e7,
+       0x21f440e3,
+/* 0x05cf: ctx_redswitch */
+       0xf000f89d,
+       0x07f120f7,
+       0x03f08500,
+       0x000fd001,
+       0xe7f004bd,
+/* 0x05e1: ctx_redswitch_delay */
+       0x01e2b608,
+       0xf1fd1bf4,
+       0xf10800f5,
+       0xf10200f5,
        0xf0850007,
        0x0fd00103,
-       0xf004bd00,
-/* 0x05de: ctx_redswitch_delay */
-       0xe2b608e7,
-       0xfd1bf401,
-       0x0800f5f1,
-       0x0200f5f1,
-       0x850007f1,
-       0xd00103f0,
-       0x04bd000f,
-/* 0x05fa: ctx_xfer */
-       0x07f100f8,
-       0x03f08100,
-       0x000fd002,
-       0x11f404bd,
-       0xcc21f507,
-/* 0x060d: ctx_xfer_not_load */
-       0x6a21f505,
-       0xf124bd02,
-       0xf047fc07,
-       0x02d00203,
-       0xf004bd00,
-       0x20b6012c,
-       0xfc07f103,
-       0x0203f04a,
-       0xbd0002d0,
-       0x01acf004,
-       0xf102a5f0,
-       0xf00000b7,
-       0x0c9850b3,
-       0x0fc4b604,
-       0x9800bcbb,
-       0x0d98000c,
-       0x00e7f001,
-       0x016f21f5,
-       0xf101acf0,
-       0xf04000b7,
-       0x0c9850b3,
-       0x0fc4b604,
-       0x9800bcbb,
-       0x0d98010c,
-       0x060f9802,
-       0x0800e7f1,
-       0x016f21f5,
+       0xf804bd00,
+/* 0x05fd: ctx_xfer */
+       0x0007f100,
+       0x0203f081,
+       0xbd000fd0,
+       0x0711f404,
+       0x05cf21f5,
+/* 0x0610: ctx_xfer_not_load */
+       0x026a21f5,
+       0x07f124bd,
+       0x03f047fc,
+       0x0002d002,
+       0x2cf004bd,
+       0x0320b601,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd0002,
        0xf001acf0,
-       0xb7f104a5,
-       0xb3f03000,
+       0xb7f102a5,
+       0xb3f00000,
        0x040c9850,
        0xbb0fc4b6,
        0x0c9800bc,
-       0x030d9802,
-       0xf1080f98,
-       0xf50200e7,
-       0xf5016f21,
-       0xf4025e21,
-       0x12f40601,
-/* 0x06a9: ctx_xfer_post */
-       0x7f21f507,
-/* 0x06ad: ctx_xfer_done */
-       0xb421f502,
-       0x0000f805,
-       0x00000000,
+       0x010d9800,
+       0xf500e7f0,
+       0xf0016f21,
+       0xb7f101ac,
+       0xb3f04000,
+       0x040c9850,
+       0xbb0fc4b6,
+       0x0c9800bc,
+       0x020d9801,
+       0xf1060f98,
+       0xf50800e7,
+       0xf0016f21,
+       0xa5f001ac,
+       0x00b7f104,
+       0x50b3f030,
+       0xb6040c98,
+       0xbcbb0fc4,
+       0x020c9800,
+       0x98030d98,
+       0xe7f1080f,
+       0x21f50200,
+       0x21f5016f,
+       0x01f4025e,
+       0x0712f406,
+/* 0x06ac: ctx_xfer_post */
+       0x027f21f5,
+/* 0x06b0: ctx_xfer_done */
+       0x05b721f5,
+       0x000000f8,
        0x00000000,
        0x00000000,
        0x00000000,
index ee8e54d..1c6e11b 100644 (file)
@@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = {
        0x03f01200,
        0x0002d000,
        0x17f104bd,
-       0x10fe0542,
+       0x10fe0545,
        0x0007f100,
        0x0003f007,
        0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gk110_grgpc_code[] = {
        0x02d00103,
        0xf104bd00,
        0xf00c30e7,
-       0x24bd50e3,
-       0x44bd34bd,
-/* 0x0430: init_unk_loop */
-       0xb06821f4,
-       0x0bf400f6,
-       0x01f7f00f,
-       0xfd04f2bb,
-       0x30b6054f,
-/* 0x0445: init_unk_next */
-       0x0120b601,
-       0xb004e0b6,
-       0x1bf40226,
-/* 0x0451: init_unk_done */
-       0x070380e2,
-       0xf1080480,
-       0xf0010027,
-       0x22cf0223,
-       0x9534bd00,
-       0x07f10825,
-       0x03f0c000,
-       0x0005d001,
-       0x07f104bd,
-       0x03f0c100,
-       0x0005d001,
-       0x0e9804bd,
-       0x010f9800,
-       0x015021f5,
-       0xbb002fbb,
-       0x0e98003f,
-       0x020f9801,
-       0x015021f5,
-       0xfd050e98,
-       0x2ebb00ef,
-       0x003ebb00,
-       0x98020e98,
-       0x21f5030f,
-       0x0e980150,
-       0x00effd07,
-       0xbb002ebb,
-       0x35b6003e,
-       0x0007f102,
-       0x0103f0d3,
-       0xbd0003d0,
-       0x0825b604,
-       0xb60635b6,
-       0x30b60120,
-       0x0824b601,
-       0xb90834b6,
-       0x21f5022f,
-       0x2fbb02d3,
-       0x003fbb00,
-       0x010007f1,
-       0xd00203f0,
+       0xe5f050e3,
+       0xbd24bd01,
+/* 0x0433: init_unk_loop */
+       0xf444bd34,
+       0xf6b06821,
+       0x0f0bf400,
+       0xbb01f7f0,
+       0x4ffd04f2,
+       0x0130b605,
+/* 0x0448: init_unk_next */
+       0xb60120b6,
+       0x26b004e0,
+       0xe21bf402,
+/* 0x0454: init_unk_done */
+       0x80070380,
+       0x27f10804,
+       0x23f00100,
+       0x0022cf02,
+       0x259534bd,
+       0x0007f108,
+       0x0103f0c0,
+       0xbd0005d0,
+       0x0007f104,
+       0x0103f0c1,
+       0xbd0005d0,
+       0x000e9804,
+       0xf5010f98,
+       0xbb015021,
+       0x3fbb002f,
+       0x010e9800,
+       0xf5020f98,
+       0x98015021,
+       0xeffd050e,
+       0x002ebb00,
+       0x98003ebb,
+       0x0f98020e,
+       0x5021f503,
+       0x070e9801,
+       0xbb00effd,
+       0x3ebb002e,
+       0x0235b600,
+       0xd30007f1,
+       0xd00103f0,
        0x04bd0003,
-       0x29f024bd,
-       0x0007f11f,
-       0x0203f030,
-       0xbd0002d0,
-/* 0x0505: main */
-       0x0031f404,
-       0xf00028f4,
-       0x21f424d7,
-       0xf401f439,
-       0xf404e4b0,
-       0x81fe1e18,
-       0x0627f001,
-       0x12fd20bd,
-       0x01e4b604,
-       0xfe051efd,
-       0x21f50018,
-       0x0ef405fa,
-/* 0x0535: main_not_ctx_xfer */
-       0x10ef94d3,
-       0xf501f5f0,
-       0xf4037e21,
-/* 0x0542: ih */
-       0x80f9c60e,
-       0xf90188fe,
-       0xf990f980,
-       0xf9b0f9a0,
-       0xf9e0f9d0,
-       0xf104bdf0,
-       0xf00200a7,
-       0xaacf00a3,
-       0x04abc400,
-       0xf02c0bf4,
-       0xe7f124d7,
-       0xe3f01a00,
-       0x00eecf00,
-       0x1900f7f1,
-       0xcf00f3f0,
-       0x21f400ff,
-       0x01e7f004,
-       0x1d0007f1,
-       0xd00003f0,
-       0x04bd000e,
-/* 0x0590: ih_no_fifo */
-       0x010007f1,
-       0xd00003f0,
-       0x04bd000a,
-       0xe0fcf0fc,
-       0xb0fcd0fc,
-       0x90fca0fc,
-       0x88fe80fc,
-       0xf480fc00,
-       0x01f80032,
-/* 0x05b4: hub_barrier_done */
-       0x9801f7f0,
-       0xfebb040e,
-       0x02ffb904,
-       0x9418e7f1,
-       0xf440e3f0,
-       0x00f89d21,
-/* 0x05cc: ctx_redswitch */
-       0xf120f7f0,
+       0xb60825b6,
+       0x20b60635,
+       0x0130b601,
+       0xb60824b6,
+       0x2fb90834,
+       0xd321f502,
+       0x002fbb02,
+       0xf1003fbb,
+       0xf0010007,
+       0x03d00203,
+       0xbd04bd00,
+       0x1f29f024,
+       0x300007f1,
+       0xd00203f0,
+       0x04bd0002,
+/* 0x0508: main */
+       0xf40031f4,
+       0xd7f00028,
+       0x3921f424,
+       0xb0f401f4,
+       0x18f404e4,
+       0x0181fe1e,
+       0xbd0627f0,
+       0x0412fd20,
+       0xfd01e4b6,
+       0x18fe051e,
+       0xfd21f500,
+       0xd30ef405,
+/* 0x0538: main_not_ctx_xfer */
+       0xf010ef94,
+       0x21f501f5,
+       0x0ef4037e,
+/* 0x0545: ih */
+       0xfe80f9c6,
+       0x80f90188,
+       0xa0f990f9,
+       0xd0f9b0f9,
+       0xf0f9e0f9,
+       0xa7f104bd,
+       0xa3f00200,
+       0x00aacf00,
+       0xf404abc4,
+       0xd7f02c0b,
+       0x00e7f124,
+       0x00e3f01a,
+       0xf100eecf,
+       0xf01900f7,
+       0xffcf00f3,
+       0x0421f400,
+       0xf101e7f0,
+       0xf01d0007,
+       0x0ed00003,
+/* 0x0593: ih_no_fifo */
+       0xf104bd00,
+       0xf0010007,
+       0x0ad00003,
+       0xfc04bd00,
+       0xfce0fcf0,
+       0xfcb0fcd0,
+       0xfc90fca0,
+       0x0088fe80,
+       0x32f480fc,
+/* 0x05b7: hub_barrier_done */
+       0xf001f800,
+       0x0e9801f7,
+       0x04febb04,
+       0xf102ffb9,
+       0xf09418e7,
+       0x21f440e3,
+/* 0x05cf: ctx_redswitch */
+       0xf000f89d,
+       0x07f120f7,
+       0x03f08500,
+       0x000fd001,
+       0xe7f004bd,
+/* 0x05e1: ctx_redswitch_delay */
+       0x01e2b608,
+       0xf1fd1bf4,
+       0xf10800f5,
+       0xf10200f5,
        0xf0850007,
        0x0fd00103,
-       0xf004bd00,
-/* 0x05de: ctx_redswitch_delay */
-       0xe2b608e7,
-       0xfd1bf401,
-       0x0800f5f1,
-       0x0200f5f1,
-       0x850007f1,
-       0xd00103f0,
-       0x04bd000f,
-/* 0x05fa: ctx_xfer */
-       0x07f100f8,
-       0x03f08100,
-       0x000fd002,
-       0x11f404bd,
-       0xcc21f507,
-/* 0x060d: ctx_xfer_not_load */
-       0x6a21f505,
-       0xf124bd02,
-       0xf047fc07,
-       0x02d00203,
-       0xf004bd00,
-       0x20b6012c,
-       0xfc07f103,
-       0x0203f04a,
-       0xbd0002d0,
-       0x01acf004,
-       0xf102a5f0,
-       0xf00000b7,
-       0x0c9850b3,
-       0x0fc4b604,
-       0x9800bcbb,
-       0x0d98000c,
-       0x00e7f001,
-       0x016f21f5,
-       0xf101acf0,
-       0xf04000b7,
-       0x0c9850b3,
-       0x0fc4b604,
-       0x9800bcbb,
-       0x0d98010c,
-       0x060f9802,
-       0x0800e7f1,
-       0x016f21f5,
+       0xf804bd00,
+/* 0x05fd: ctx_xfer */
+       0x0007f100,
+       0x0203f081,
+       0xbd000fd0,
+       0x0711f404,
+       0x05cf21f5,
+/* 0x0610: ctx_xfer_not_load */
+       0x026a21f5,
+       0x07f124bd,
+       0x03f047fc,
+       0x0002d002,
+       0x2cf004bd,
+       0x0320b601,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd0002,
        0xf001acf0,
-       0xb7f104a5,
-       0xb3f03000,
+       0xb7f102a5,
+       0xb3f00000,
        0x040c9850,
        0xbb0fc4b6,
        0x0c9800bc,
-       0x030d9802,
-       0xf1080f98,
-       0xf50200e7,
-       0xf5016f21,
-       0xf4025e21,
-       0x12f40601,
-/* 0x06a9: ctx_xfer_post */
-       0x7f21f507,
-/* 0x06ad: ctx_xfer_done */
-       0xb421f502,
-       0x0000f805,
-       0x00000000,
+       0x010d9800,
+       0xf500e7f0,
+       0xf0016f21,
+       0xb7f101ac,
+       0xb3f04000,
+       0x040c9850,
+       0xbb0fc4b6,
+       0x0c9800bc,
+       0x020d9801,
+       0xf1060f98,
+       0xf50800e7,
+       0xf0016f21,
+       0xa5f001ac,
+       0x00b7f104,
+       0x50b3f030,
+       0xb6040c98,
+       0xbcbb0fc4,
+       0x020c9800,
+       0x98030d98,
+       0xe7f1080f,
+       0x21f50200,
+       0x21f5016f,
+       0x01f4025e,
+       0x0712f406,
+/* 0x06ac: ctx_xfer_post */
+       0x027f21f5,
+/* 0x06b0: ctx_xfer_done */
+       0x05b721f5,
+       0x000000f8,
        0x00000000,
        0x00000000,
        0x00000000,
index fbcc342..84af7ec 100644 (file)
@@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = {
        0x02020014,
        0xf6120040,
        0x04bd0002,
-       0xfe048141,
+       0xfe048441,
        0x00400010,
        0x0000f607,
        0x040204bd,
@@ -295,165 +295,165 @@ uint32_t gk208_grgpc_code[] = {
        0x01c90080,
        0xbd0002f6,
        0x0c308e04,
-       0xbd24bd50,
-/* 0x0383: init_unk_loop */
-       0x7e44bd34,
-       0xb0000065,
-       0x0bf400f6,
-       0xbb010f0e,
-       0x4ffd04f2,
-       0x0130b605,
-/* 0x0398: init_unk_next */
-       0xb60120b6,
-       0x26b004e0,
-       0xe21bf401,
-/* 0x03a4: init_unk_done */
-       0xb50703b5,
-       0x00820804,
-       0x22cf0201,
-       0x9534bd00,
-       0x00800825,
-       0x05f601c0,
-       0x8004bd00,
-       0xf601c100,
-       0x04bd0005,
-       0x98000e98,
-       0x207e010f,
-       0x2fbb0001,
-       0x003fbb00,
-       0x98010e98,
-       0x207e020f,
-       0x0e980001,
-       0x00effd05,
-       0xbb002ebb,
-       0x0e98003e,
-       0x030f9802,
-       0x0001207e,
-       0xfd070e98,
-       0x2ebb00ef,
-       0x003ebb00,
-       0x800235b6,
-       0xf601d300,
-       0x04bd0003,
-       0xb60825b6,
-       0x20b60635,
-       0x0130b601,
-       0xb60824b6,
-       0x2fb20834,
-       0x0002687e,
-       0xbb002fbb,
-       0x0080003f,
-       0x03f60201,
-       0xbd04bd00,
-       0x1f29f024,
-       0x02300080,
-       0xbd0002f6,
-/* 0x0445: main */
-       0x0031f404,
-       0x0d0028f4,
-       0x00377e24,
-       0xf401f400,
-       0xf404e4b0,
-       0x81fe1d18,
-       0xbd060201,
-       0x0412fd20,
-       0xfd01e4b6,
-       0x18fe051e,
-       0x05187e00,
-       0xd40ef400,
-/* 0x0474: main_not_ctx_xfer */
-       0xf010ef94,
-       0xf87e01f5,
-       0x0ef40002,
-/* 0x0481: ih */
-       0xfe80f9c7,
-       0x80f90188,
-       0xa0f990f9,
-       0xd0f9b0f9,
-       0xf0f9e0f9,
-       0x004a04bd,
-       0x00aacf02,
-       0xf404abc4,
-       0x240d1f0b,
-       0xcf1a004e,
-       0x004f00ee,
-       0x00ffcf19,
-       0x0000047e,
-       0x0040010e,
-       0x000ef61d,
-/* 0x04be: ih_no_fifo */
-       0x004004bd,
-       0x000af601,
-       0xf0fc04bd,
-       0xd0fce0fc,
-       0xa0fcb0fc,
-       0x80fc90fc,
-       0xfc0088fe,
-       0x0032f480,
-/* 0x04de: hub_barrier_done */
-       0x010f01f8,
-       0xbb040e98,
-       0xffb204fe,
-       0x4094188e,
-       0x00008f7e,
-/* 0x04f2: ctx_redswitch */
-       0x200f00f8,
+       0x01e5f050,
+       0x34bd24bd,
+/* 0x0386: init_unk_loop */
+       0x657e44bd,
+       0xf6b00000,
+       0x0e0bf400,
+       0xf2bb010f,
+       0x054ffd04,
+/* 0x039b: init_unk_next */
+       0xb60130b6,
+       0xe0b60120,
+       0x0126b004,
+/* 0x03a7: init_unk_done */
+       0xb5e21bf4,
+       0x04b50703,
+       0x01008208,
+       0x0022cf02,
+       0x259534bd,
+       0xc0008008,
+       0x0005f601,
+       0x008004bd,
+       0x05f601c1,
+       0x9804bd00,
+       0x0f98000e,
+       0x01207e01,
+       0x002fbb00,
+       0x98003fbb,
+       0x0f98010e,
+       0x01207e02,
+       0x050e9800,
+       0xbb00effd,
+       0x3ebb002e,
+       0x020e9800,
+       0x7e030f98,
+       0x98000120,
+       0xeffd070e,
+       0x002ebb00,
+       0xb6003ebb,
+       0x00800235,
+       0x03f601d3,
+       0xb604bd00,
+       0x35b60825,
+       0x0120b606,
+       0xb60130b6,
+       0x34b60824,
+       0x7e2fb208,
+       0xbb000268,
+       0x3fbb002f,
+       0x01008000,
+       0x0003f602,
+       0x24bd04bd,
+       0x801f29f0,
+       0xf6023000,
+       0x04bd0002,
+/* 0x0448: main */
+       0xf40031f4,
+       0x240d0028,
+       0x0000377e,
+       0xb0f401f4,
+       0x18f404e4,
+       0x0181fe1d,
+       0x20bd0602,
+       0xb60412fd,
+       0x1efd01e4,
+       0x0018fe05,
+       0x00051b7e,
+/* 0x0477: main_not_ctx_xfer */
+       0x94d40ef4,
+       0xf5f010ef,
+       0x02f87e01,
+       0xc70ef400,
+/* 0x0484: ih */
+       0x88fe80f9,
+       0xf980f901,
+       0xf9a0f990,
+       0xf9d0f9b0,
+       0xbdf0f9e0,
+       0x02004a04,
+       0xc400aacf,
+       0x0bf404ab,
+       0x4e240d1f,
+       0xeecf1a00,
+       0x19004f00,
+       0x7e00ffcf,
+       0x0e000004,
+       0x1d004001,
+       0xbd000ef6,
+/* 0x04c1: ih_no_fifo */
+       0x01004004,
+       0xbd000af6,
+       0xfcf0fc04,
+       0xfcd0fce0,
+       0xfca0fcb0,
+       0xfe80fc90,
+       0x80fc0088,
+       0xf80032f4,
+/* 0x04e1: hub_barrier_done */
+       0x98010f01,
+       0xfebb040e,
+       0x8effb204,
+       0x7e409418,
+       0xf800008f,
+/* 0x04f5: ctx_redswitch */
+       0x80200f00,
+       0xf6018500,
+       0x04bd000f,
+/* 0x0502: ctx_redswitch_delay */
+       0xe2b6080e,
+       0xfd1bf401,
+       0x0800f5f1,
+       0x0200f5f1,
        0x01850080,
        0xbd000ff6,
-/* 0x04ff: ctx_redswitch_delay */
-       0xb6080e04,
-       0x1bf401e2,
-       0x00f5f1fd,
-       0x00f5f108,
-       0x85008002,
-       0x000ff601,
-       0x00f804bd,
-/* 0x0518: ctx_xfer */
-       0x02810080,
-       0xbd000ff6,
-       0x0711f404,
-       0x0004f27e,
-/* 0x0528: ctx_xfer_not_load */
-       0x0002167e,
-       0xfc8024bd,
-       0x02f60247,
-       0xf004bd00,
-       0x20b6012c,
-       0x4afc8003,
+/* 0x051b: ctx_xfer */
+       0x8000f804,
+       0xf6028100,
+       0x04bd000f,
+       0x7e0711f4,
+/* 0x052b: ctx_xfer_not_load */
+       0x7e0004f5,
+       0xbd000216,
+       0x47fc8024,
        0x0002f602,
-       0xacf004bd,
-       0x02a5f001,
-       0x5000008b,
-       0xb6040c98,
-       0xbcbb0fc4,
-       0x000c9800,
-       0x0e010d98,
-       0x013d7e00,
-       0x01acf000,
-       0x5040008b,
-       0xb6040c98,
-       0xbcbb0fc4,
-       0x010c9800,
-       0x98020d98,
-       0x004e060f,
-       0x013d7e08,
-       0x01acf000,
-       0x8b04a5f0,
-       0x98503000,
+       0x2cf004bd,
+       0x0320b601,
+       0x024afc80,
+       0xbd0002f6,
+       0x01acf004,
+       0x8b02a5f0,
+       0x98500000,
        0xc4b6040c,
        0x00bcbb0f,
-       0x98020c98,
-       0x0f98030d,
-       0x02004e08,
+       0x98000c98,
+       0x000e010d,
        0x00013d7e,
-       0x00020a7e,
-       0xf40601f4,
-/* 0x05b2: ctx_xfer_post */
-       0x277e0712,
-/* 0x05b6: ctx_xfer_done */
-       0xde7e0002,
-       0x00f80004,
-       0x00000000,
+       0x8b01acf0,
+       0x98504000,
+       0xc4b6040c,
+       0x00bcbb0f,
+       0x98010c98,
+       0x0f98020d,
+       0x08004e06,
+       0x00013d7e,
+       0xf001acf0,
+       0x008b04a5,
+       0x0c985030,
+       0x0fc4b604,
+       0x9800bcbb,
+       0x0d98020c,
+       0x080f9803,
+       0x7e02004e,
+       0x7e00013d,
+       0xf400020a,
+       0x12f40601,
+/* 0x05b5: ctx_xfer_post */
+       0x02277e07,
+/* 0x05b9: ctx_xfer_done */
+       0x04e17e00,
+       0x0000f800,
        0x00000000,
        0x00000000,
        0x00000000,
index 51f5c3c..11bf363 100644 (file)
@@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = {
        0x020014fe,
        0x12004002,
        0xbd0002f6,
-       0x05b04104,
+       0x05b34104,
        0x400010fe,
        0x00f60700,
        0x0204bd00,
@@ -308,259 +308,259 @@ uint32_t gm107_grgpc_code[] = {
        0xc900800f,
        0x0002f601,
        0x308e04bd,
-       0x24bd500c,
-       0x44bd34bd,
-/* 0x03b0: init_unk_loop */
-       0x0000657e,
-       0xf400f6b0,
-       0x010f0e0b,
-       0xfd04f2bb,
-       0x30b6054f,
-/* 0x03c5: init_unk_next */
-       0x0120b601,
-       0xb004e0b6,
-       0x1bf40226,
-/* 0x03d1: init_unk_done */
-       0x0703b5e2,
-       0x820804b5,
-       0xcf020100,
-       0x34bd0022,
-       0x80082595,
-       0xf601c000,
+       0xe5f0500c,
+       0xbd24bd01,
+/* 0x03b3: init_unk_loop */
+       0x7e44bd34,
+       0xb0000065,
+       0x0bf400f6,
+       0xbb010f0e,
+       0x4ffd04f2,
+       0x0130b605,
+/* 0x03c8: init_unk_next */
+       0xb60120b6,
+       0x26b004e0,
+       0xe21bf402,
+/* 0x03d4: init_unk_done */
+       0xb50703b5,
+       0x00820804,
+       0x22cf0201,
+       0x9534bd00,
+       0x00800825,
+       0x05f601c0,
+       0x8004bd00,
+       0xf601c100,
        0x04bd0005,
-       0x01c10080,
-       0xbd0005f6,
-       0x000e9804,
-       0x7e010f98,
-       0xbb000120,
-       0x3fbb002f,
-       0x010e9800,
-       0x7e020f98,
-       0x98000120,
-       0xeffd050e,
-       0x002ebb00,
-       0x98003ebb,
-       0x0f98020e,
-       0x01207e03,
-       0x070e9800,
-       0xbb00effd,
-       0x3ebb002e,
-       0x0235b600,
-       0x01d30080,
-       0xbd0003f6,
-       0x0825b604,
-       0xb60635b6,
-       0x30b60120,
-       0x0824b601,
-       0xb20834b6,
-       0x02687e2f,
-       0x002fbb00,
-       0x0f003fbb,
-       0x8effb23f,
-       0xf0501d60,
-       0x8f7e01e5,
-       0x0c0f0000,
-       0xa88effb2,
-       0xe5f0501d,
-       0x008f7e01,
-       0x03147e00,
-       0xb23f0f00,
-       0x1d608eff,
-       0x01e5f050,
-       0x00008f7e,
-       0xffb2000f,
-       0x501d9c8e,
-       0x7e01e5f0,
-       0x0f00008f,
-       0x03147e01,
-       0x8effb200,
+       0x98000e98,
+       0x207e010f,
+       0x2fbb0001,
+       0x003fbb00,
+       0x98010e98,
+       0x207e020f,
+       0x0e980001,
+       0x00effd05,
+       0xbb002ebb,
+       0x0e98003e,
+       0x030f9802,
+       0x0001207e,
+       0xfd070e98,
+       0x2ebb00ef,
+       0x003ebb00,
+       0x800235b6,
+       0xf601d300,
+       0x04bd0003,
+       0xb60825b6,
+       0x20b60635,
+       0x0130b601,
+       0xb60824b6,
+       0x2fb20834,
+       0x0002687e,
+       0xbb002fbb,
+       0x3f0f003f,
+       0x501d608e,
+       0xb201e5f0,
+       0x008f7eff,
+       0x8e0c0f00,
        0xf0501da8,
-       0x8f7e01e5,
-       0xff0f0000,
-       0x988effb2,
+       0xffb201e5,
+       0x00008f7e,
+       0x0003147e,
+       0x608e3f0f,
        0xe5f0501d,
-       0x008f7e01,
-       0xb2020f00,
-       0x1da88eff,
+       0x7effb201,
+       0x0f00008f,
+       0x1d9c8e00,
        0x01e5f050,
-       0x00008f7e,
+       0x8f7effb2,
+       0x010f0000,
        0x0003147e,
-       0x85050498,
-       0x98504000,
-       0x64b60406,
-       0x0056bb0f,
-/* 0x04e0: tpc_strand_init_tpc_loop */
-       0x05705eb8,
-       0x00657e00,
-       0xbdf6b200,
-/* 0x04ed: tpc_strand_init_idx_loop */
-       0x605eb874,
-       0x7fb20005,
-       0x00008f7e,
-       0x05885eb8,
-       0x082f9500,
-       0x00008f7e,
-       0x058c5eb8,
-       0x082f9500,
+       0x501da88e,
+       0xb201e5f0,
+       0x008f7eff,
+       0x8eff0f00,
+       0xf0501d98,
+       0xffb201e5,
        0x00008f7e,
-       0x05905eb8,
-       0x00657e00,
-       0x06f5b600,
-       0xb601f0b6,
-       0x2fbb08f4,
-       0x003fbb00,
-       0xb60170b6,
-       0x1bf40162,
-       0x0050b7bf,
-       0x0142b608,
-       0x0fa81bf4,
-       0x8effb23f,
-       0xf0501d60,
-       0x8f7e01e5,
-       0x0d0f0000,
-       0xa88effb2,
+       0xa88e020f,
        0xe5f0501d,
-       0x008f7e01,
-       0x03147e00,
-       0x01008000,
-       0x0003f602,
-       0x24bd04bd,
-       0x801f29f0,
-       0xf6023000,
-       0x04bd0002,
-/* 0x0574: main */
-       0xf40031f4,
-       0x240d0028,
-       0x0000377e,
-       0xb0f401f4,
-       0x18f404e4,
-       0x0181fe1d,
-       0x20bd0602,
-       0xb60412fd,
-       0x1efd01e4,
-       0x0018fe05,
-       0x0006477e,
-/* 0x05a3: main_not_ctx_xfer */
-       0x94d40ef4,
-       0xf5f010ef,
-       0x02f87e01,
-       0xc70ef400,
-/* 0x05b0: ih */
-       0x88fe80f9,
-       0xf980f901,
-       0xf9a0f990,
-       0xf9d0f9b0,
-       0xbdf0f9e0,
-       0x02004a04,
-       0xc400aacf,
-       0x0bf404ab,
-       0x4e240d1f,
-       0xeecf1a00,
-       0x19004f00,
-       0x7e00ffcf,
-       0x0e000004,
-       0x1d004001,
-       0xbd000ef6,
-/* 0x05ed: ih_no_fifo */
-       0x01004004,
-       0xbd000af6,
-       0xfcf0fc04,
-       0xfcd0fce0,
-       0xfca0fcb0,
-       0xfe80fc90,
-       0x80fc0088,
-       0xf80032f4,
-/* 0x060d: hub_barrier_done */
-       0x98010f01,
-       0xfebb040e,
-       0x8effb204,
-       0x7e409418,
-       0xf800008f,
-/* 0x0621: ctx_redswitch */
-       0x80200f00,
+       0x7effb201,
+       0x7e00008f,
+       0x98000314,
+       0x00850504,
+       0x06985040,
+       0x0f64b604,
+/* 0x04e3: tpc_strand_init_tpc_loop */
+       0xb80056bb,
+       0x0005705e,
+       0x0000657e,
+       0x74bdf6b2,
+/* 0x04f0: tpc_strand_init_idx_loop */
+       0x05605eb8,
+       0x7e7fb200,
+       0xb800008f,
+       0x0005885e,
+       0x7e082f95,
+       0xb800008f,
+       0x00058c5e,
+       0x7e082f95,
+       0xb800008f,
+       0x0005905e,
+       0x0000657e,
+       0xb606f5b6,
+       0xf4b601f0,
+       0x002fbb08,
+       0xb6003fbb,
+       0x62b60170,
+       0xbf1bf401,
+       0x080050b7,
+       0xf40142b6,
+       0x3f0fa81b,
+       0x501d608e,
+       0xb201e5f0,
+       0x008f7eff,
+       0x8e0d0f00,
+       0xf0501da8,
+       0xffb201e5,
+       0x00008f7e,
+       0x0003147e,
+       0x02010080,
+       0xbd0003f6,
+       0xf024bd04,
+       0x00801f29,
+       0x02f60230,
+/* 0x0577: main */
+       0xf404bd00,
+       0x28f40031,
+       0x7e240d00,
+       0xf4000037,
+       0xe4b0f401,
+       0x1d18f404,
+       0x020181fe,
+       0xfd20bd06,
+       0xe4b60412,
+       0x051efd01,
+       0x7e0018fe,
+       0xf400064a,
+/* 0x05a6: main_not_ctx_xfer */
+       0xef94d40e,
+       0x01f5f010,
+       0x0002f87e,
+/* 0x05b3: ih */
+       0xf9c70ef4,
+       0x0188fe80,
+       0x90f980f9,
+       0xb0f9a0f9,
+       0xe0f9d0f9,
+       0x04bdf0f9,
+       0xcf02004a,
+       0xabc400aa,
+       0x1f0bf404,
+       0x004e240d,
+       0x00eecf1a,
+       0xcf19004f,
+       0x047e00ff,
+       0x010e0000,
+       0xf61d0040,
+       0x04bd000e,
+/* 0x05f0: ih_no_fifo */
+       0xf6010040,
+       0x04bd000a,
+       0xe0fcf0fc,
+       0xb0fcd0fc,
+       0x90fca0fc,
+       0x88fe80fc,
+       0xf480fc00,
+       0x01f80032,
+/* 0x0610: hub_barrier_done */
+       0x0e98010f,
+       0x04febb04,
+       0x188effb2,
+       0x8f7e4094,
+       0x00f80000,
+/* 0x0624: ctx_redswitch */
+       0x0080200f,
+       0x0ff60185,
+       0x0e04bd00,
+/* 0x0631: ctx_redswitch_delay */
+       0x01e2b608,
+       0xf1fd1bf4,
+       0xf10800f5,
+       0x800200f5,
        0xf6018500,
        0x04bd000f,
-/* 0x062e: ctx_redswitch_delay */
-       0xe2b6080e,
-       0xfd1bf401,
-       0x0800f5f1,
-       0x0200f5f1,
-       0x01850080,
-       0xbd000ff6,
-/* 0x0647: ctx_xfer */
-       0x8000f804,
-       0xf6028100,
-       0x04bd000f,
-       0xc48effb2,
-       0xe5f0501d,
-       0x008f7e01,
-       0x0711f400,
-       0x0006217e,
-/* 0x0664: ctx_xfer_not_load */
-       0x0002167e,
-       0xfc8024bd,
-       0x02f60247,
-       0xf004bd00,
-       0x20b6012c,
-       0x4afc8003,
+/* 0x064a: ctx_xfer */
+       0x008000f8,
+       0x0ff60281,
+       0x8e04bd00,
+       0xf0501dc4,
+       0xffb201e5,
+       0x00008f7e,
+       0x7e0711f4,
+/* 0x0667: ctx_xfer_not_load */
+       0x7e000624,
+       0xbd000216,
+       0x47fc8024,
        0x0002f602,
-       0x0c0f04bd,
-       0xa88effb2,
-       0xe5f0501d,
-       0x008f7e01,
-       0x03147e00,
-       0xb23f0f00,
-       0x1d608eff,
-       0x01e5f050,
+       0x2cf004bd,
+       0x0320b601,
+       0x024afc80,
+       0xbd0002f6,
+       0x8e0c0f04,
+       0xf0501da8,
+       0xffb201e5,
        0x00008f7e,
-       0xffb2000f,
-       0x501d9c8e,
-       0x7e01e5f0,
+       0x0003147e,
+       0x608e3f0f,
+       0xe5f0501d,
+       0x7effb201,
        0x0f00008f,
-       0x03147e01,
-       0x01fcf000,
-       0xb203f0b6,
-       0x1da88eff,
+       0x1d9c8e00,
        0x01e5f050,
-       0x00008f7e,
-       0xf001acf0,
-       0x008b02a5,
-       0x0c985000,
-       0x0fc4b604,
-       0x9800bcbb,
-       0x0d98000c,
-       0x7e000e01,
-       0xf000013d,
-       0x008b01ac,
-       0x0c985040,
-       0x0fc4b604,
-       0x9800bcbb,
-       0x0d98010c,
-       0x060f9802,
-       0x7e08004e,
-       0xf000013d,
+       0x8f7effb2,
+       0x010f0000,
+       0x0003147e,
+       0xb601fcf0,
+       0xa88e03f0,
+       0xe5f0501d,
+       0x7effb201,
+       0xf000008f,
        0xa5f001ac,
-       0x30008b04,
+       0x00008b02,
        0x040c9850,
        0xbb0fc4b6,
        0x0c9800bc,
-       0x030d9802,
-       0x4e080f98,
-       0x3d7e0200,
-       0x0a7e0001,
-       0x147e0002,
-       0x01f40003,
-       0x1a12f406,
-/* 0x073c: ctx_xfer_post */
-       0x0002277e,
-       0xffb20d0f,
-       0x501da88e,
-       0x7e01e5f0,
-       0x7e00008f,
-/* 0x0753: ctx_xfer_done */
-       0x7e000314,
-       0xf800060d,
-       0x00000000,
+       0x010d9800,
+       0x3d7e000e,
+       0xacf00001,
+       0x40008b01,
+       0x040c9850,
+       0xbb0fc4b6,
+       0x0c9800bc,
+       0x020d9801,
+       0x4e060f98,
+       0x3d7e0800,
+       0xacf00001,
+       0x04a5f001,
+       0x5030008b,
+       0xb6040c98,
+       0xbcbb0fc4,
+       0x020c9800,
+       0x98030d98,
+       0x004e080f,
+       0x013d7e02,
+       0x020a7e00,
+       0x03147e00,
+       0x0601f400,
+/* 0x073f: ctx_xfer_post */
+       0x7e1a12f4,
+       0x0f000227,
+       0x1da88e0d,
+       0x01e5f050,
+       0x8f7effb2,
+       0x147e0000,
+/* 0x0756: ctx_xfer_done */
+       0x107e0003,
+       0x00f80006,
        0x00000000,
        0x00000000,
        0x00000000,
index dda7a7d..9f5dfc8 100644 (file)
@@ -143,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
 static int
 gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
 {
-       struct gf100_gr *gr = (void *)object->engine;
+       struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
        union {
                struct fermi_a_zbc_color_v0 v0;
        } *args = data;
@@ -189,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
 static int
 gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
 {
-       struct gf100_gr *gr = (void *)object->engine;
+       struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
        union {
                struct fermi_a_zbc_depth_v0 v0;
        } *args = data;
@@ -1530,6 +1530,8 @@ gf100_gr_oneinit(struct nvkm_gr *base)
                gr->ppc_nr[i]  = gr->func->ppc_nr;
                for (j = 0; j < gr->ppc_nr[i]; j++) {
                        u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
+                       if (mask)
+                               gr->ppc_mask[i] |= (1 << j);
                        gr->ppc_tpc_nr[i][j] = hweight8(mask);
                }
        }
index 4611961..02e78b8 100644 (file)
@@ -97,6 +97,7 @@ struct gf100_gr {
        u8 tpc_nr[GPC_MAX];
        u8 tpc_total;
        u8 ppc_nr[GPC_MAX];
+       u8 ppc_mask[GPC_MAX];
        u8 ppc_tpc_nr[GPC_MAX][4];
 
        struct nvkm_memory *unk4188b4;
index 895ba74..1d7dd38 100644 (file)
@@ -97,7 +97,9 @@ static void *
 nvkm_instobj_dtor(struct nvkm_memory *memory)
 {
        struct nvkm_instobj *iobj = nvkm_instobj(memory);
+       spin_lock(&iobj->imem->lock);
        list_del(&iobj->head);
+       spin_unlock(&iobj->imem->lock);
        nvkm_memory_del(&iobj->parent);
        return iobj;
 }
@@ -190,7 +192,9 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
                nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
                iobj->parent = memory;
                iobj->imem = imem;
+               spin_lock(&iobj->imem->lock);
                list_add_tail(&iobj->head, &imem->list);
+               spin_unlock(&iobj->imem->lock);
                memory = &iobj->memory;
        }
 
@@ -309,5 +313,6 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
 {
        nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
        imem->func = func;
+       spin_lock_init(&imem->lock);
        INIT_LIST_HEAD(&imem->list);
 }
index b61509e..b735173 100644 (file)
@@ -59,7 +59,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
        duty = (uv - bios->base) * div / bios->pwm_range;
 
        nvkm_wr32(device, 0x20340, div);
-       nvkm_wr32(device, 0x20344, 0x8000000 | duty);
+       nvkm_wr32(device, 0x20344, 0x80000000 | duty);
 
        return 0;
 }
index d302488..84d4563 100644 (file)
@@ -221,11 +221,17 @@ int radeon_bo_create(struct radeon_device *rdev,
        if (!(rdev->flags & RADEON_IS_PCIE))
                bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
 
+       /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
+        * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
+        */
+       if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
+               bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+
 #ifdef CONFIG_X86_32
        /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
         * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
         */
-       bo->flags &= ~RADEON_GEM_GTT_WC;
+       bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
        /* Don't try to enable write-combining when it can't work, or things
         * may be slow
@@ -235,9 +241,10 @@ int radeon_bo_create(struct radeon_device *rdev,
 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
         thanks to write-combining
 
-       DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
-                     "better performance thanks to write-combining\n");
-       bo->flags &= ~RADEON_GEM_GTT_WC;
+       if (bo->flags & RADEON_GEM_GTT_WC)
+               DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+                             "better performance thanks to write-combining\n");
+       bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
 #endif
 
        radeon_ttm_placement_from_domain(bo, domain);
index 6d80dde..f4f03dc 100644 (file)
@@ -1542,8 +1542,7 @@ int radeon_pm_late_init(struct radeon_device *rdev)
                                ret = device_create_file(rdev->dev, &dev_attr_power_method);
                                if (ret)
                                        DRM_ERROR("failed to create device file for power method\n");
-                               if (!ret)
-                                       rdev->pm.sysfs_initialized = true;
+                               rdev->pm.sysfs_initialized = true;
                        }
 
                        mutex_lock(&rdev->pm.mutex);
index 3f5e1cf..d37ba2c 100644 (file)
@@ -464,7 +464,7 @@ void rv730_stop_dpm(struct radeon_device *rdev)
        result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
 
        if (result != PPSMC_Result_OK)
-               DRM_ERROR("Could not force DPM to low\n");
+               DRM_DEBUG("Could not force DPM to low\n");
 
        WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
 
index b9c7707..e830c89 100644 (file)
@@ -193,7 +193,7 @@ void rv770_stop_dpm(struct radeon_device *rdev)
        result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
 
        if (result != PPSMC_Result_OK)
-               DRM_ERROR("Could not force DPM to low.\n");
+               DRM_DEBUG("Could not force DPM to low.\n");
 
        WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
 
@@ -1418,7 +1418,7 @@ int rv770_resume_smc(struct radeon_device *rdev)
 int rv770_set_sw_state(struct radeon_device *rdev)
 {
        if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
-               return -EINVAL;
+               DRM_DEBUG("rv770_set_sw_state failed\n");
        return 0;
 }
 
index e72bf46..a82b891 100644 (file)
@@ -2927,7 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
        { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
-       { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
        { 0, 0, 0, 0 },
 };
index 7a9f476..265064c 100644 (file)
@@ -168,7 +168,7 @@ static int vc4_get_clock_select(struct drm_crtc *crtc)
        struct drm_connector *connector;
 
        drm_for_each_connector(connector, crtc->dev) {
-               if (connector && connector->state->crtc == crtc) {
+               if (connector->state->crtc == crtc) {
                        struct drm_encoder *encoder = connector->encoder;
                        struct vc4_encoder *vc4_encoder =
                                to_vc4_encoder(encoder);
@@ -401,7 +401,8 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
                dlist_next++;
 
                HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
-                         (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist);
+                         (u32 __iomem *)vc4_crtc->dlist -
+                         (u32 __iomem *)vc4->hvs->dlist);
 
                /* Make the next display list start after ours. */
                vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
@@ -591,14 +592,14 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
         * that will take too much.
         */
        primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
-       if (!primary_plane) {
+       if (IS_ERR(primary_plane)) {
                dev_err(dev, "failed to construct primary plane\n");
                ret = PTR_ERR(primary_plane);
                goto err;
        }
 
        cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
-       if (!cursor_plane) {
+       if (IS_ERR(cursor_plane)) {
                dev_err(dev, "failed to construct cursor plane\n");
                ret = PTR_ERR(cursor_plane);
                goto err_primary;
index 6e73060..d5db9e0 100644 (file)
@@ -259,7 +259,6 @@ static struct platform_driver vc4_platform_driver = {
        .remove         = vc4_platform_drm_remove,
        .driver         = {
                .name   = "vc4-drm",
-               .owner  = THIS_MODULE,
                .of_match_table = vc4_of_match,
        },
 };
index ab1673f..8098c5b 100644 (file)
@@ -75,10 +75,10 @@ void vc4_hvs_dump_state(struct drm_device *dev)
        for (i = 0; i < 64; i += 4) {
                DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
                         i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
-                        ((uint32_t *)vc4->hvs->dlist)[i + 0],
-                        ((uint32_t *)vc4->hvs->dlist)[i + 1],
-                        ((uint32_t *)vc4->hvs->dlist)[i + 2],
-                        ((uint32_t *)vc4->hvs->dlist)[i + 3]);
+                        readl((u32 __iomem *)vc4->hvs->dlist + i + 0),
+                        readl((u32 __iomem *)vc4->hvs->dlist + i + 1),
+                        readl((u32 __iomem *)vc4->hvs->dlist + i + 2),
+                        readl((u32 __iomem *)vc4->hvs->dlist + i + 3));
        }
 }
 
index cdd8b10..887f3ca 100644 (file)
@@ -70,7 +70,7 @@ static bool plane_enabled(struct drm_plane_state *state)
        return state->fb && state->crtc;
 }
 
-struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
+static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
 {
        struct vc4_plane_state *vc4_state;
 
@@ -97,8 +97,8 @@ struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
        return &vc4_state->base;
 }
 
-void vc4_plane_destroy_state(struct drm_plane *plane,
-                            struct drm_plane_state *state)
+static void vc4_plane_destroy_state(struct drm_plane *plane,
+                                   struct drm_plane_state *state)
 {
        struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
 
@@ -108,7 +108,7 @@ void vc4_plane_destroy_state(struct drm_plane *plane,
 }
 
 /* Called during init to allocate the plane's atomic state. */
-void vc4_plane_reset(struct drm_plane *plane)
+static void vc4_plane_reset(struct drm_plane *plane)
 {
        struct vc4_plane_state *vc4_state;
 
@@ -157,6 +157,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
        int crtc_w = state->crtc_w;
        int crtc_h = state->crtc_h;
 
+       if (state->crtc_w << 16 != state->src_w ||
+           state->crtc_h << 16 != state->src_h) {
+               /* We don't support scaling yet, which involves
+                * allocating the LBM memory for scaling temporary
+                * storage, and putting filter kernels in the HVS
+                * context.
+                */
+               return -EINVAL;
+       }
+
        if (crtc_x < 0) {
                offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x;
                crtc_w += crtc_x;
index c6f7a69..7e89288 100644 (file)
@@ -625,7 +625,7 @@ static void hid_close_report(struct hid_device *device)
 
 static void hid_device_release(struct device *dev)
 {
-       struct hid_device *hid = container_of(dev, struct hid_device, dev);
+       struct hid_device *hid = to_hid_device(dev);
 
        hid_close_report(hid);
        kfree(hid->dev_rdesc);
@@ -1571,8 +1571,8 @@ read_report_descriptor(struct file *filp, struct kobject *kobj,
                struct bin_attribute *attr,
                char *buf, loff_t off, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct device *dev = kobj_to_dev(kobj);
+       struct hid_device *hdev = to_hid_device(dev);
 
        if (off >= hdev->rsize)
                return 0;
@@ -1589,7 +1589,7 @@ static ssize_t
 show_country(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
 
        return sprintf(buf, "%02x\n", hdev->country & 0xff);
 }
@@ -1691,11 +1691,6 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
                hid_warn(hdev,
                         "can't create sysfs country code attribute err: %d\n", ret);
 
-       ret = device_create_bin_file(&hdev->dev, &dev_bin_attr_report_desc);
-       if (ret)
-               hid_warn(hdev,
-                        "can't create sysfs report descriptor attribute err: %d\n", ret);
-
        hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
                 buf, bus, hdev->version >> 8, hdev->version & 0xff,
                 type, hdev->name, hdev->phys);
@@ -1707,7 +1702,6 @@ EXPORT_SYMBOL_GPL(hid_connect);
 void hid_disconnect(struct hid_device *hdev)
 {
        device_remove_file(&hdev->dev, &dev_attr_country);
-       device_remove_bin_file(&hdev->dev, &dev_bin_attr_report_desc);
        if (hdev->claimed & HID_CLAIMED_INPUT)
                hidinput_disconnect(hdev);
        if (hdev->claimed & HID_CLAIMED_HIDDEV)
@@ -1902,6 +1896,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
@@ -2076,7 +2071,7 @@ struct hid_dynid {
 static ssize_t store_new_id(struct device_driver *drv, const char *buf,
                size_t count)
 {
-       struct hid_driver *hdrv = container_of(drv, struct hid_driver, driver);
+       struct hid_driver *hdrv = to_hid_driver(drv);
        struct hid_dynid *dynid;
        __u32 bus, vendor, product;
        unsigned long driver_data = 0;
@@ -2138,17 +2133,16 @@ static const struct hid_device_id *hid_match_device(struct hid_device *hdev,
 
 static int hid_bus_match(struct device *dev, struct device_driver *drv)
 {
-       struct hid_driver *hdrv = container_of(drv, struct hid_driver, driver);
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_driver *hdrv = to_hid_driver(drv);
+       struct hid_device *hdev = to_hid_device(dev);
 
        return hid_match_device(hdev, hdrv) != NULL;
 }
 
 static int hid_device_probe(struct device *dev)
 {
-       struct hid_driver *hdrv = container_of(dev->driver,
-                       struct hid_driver, driver);
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_driver *hdrv = to_hid_driver(dev->driver);
+       struct hid_device *hdev = to_hid_device(dev);
        const struct hid_device_id *id;
        int ret = 0;
 
@@ -2190,7 +2184,7 @@ unlock_driver_lock:
 
 static int hid_device_remove(struct device *dev)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct hid_driver *hdrv;
        int ret = 0;
 
@@ -2223,12 +2217,9 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
                             char *buf)
 {
        struct hid_device *hdev = container_of(dev, struct hid_device, dev);
-       int len;
-
-       len = snprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
-                      hdev->bus, hdev->group, hdev->vendor, hdev->product);
 
-       return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+       return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
+                        hdev->bus, hdev->group, hdev->vendor, hdev->product);
 }
 static DEVICE_ATTR_RO(modalias);
 
@@ -2236,11 +2227,19 @@ static struct attribute *hid_dev_attrs[] = {
        &dev_attr_modalias.attr,
        NULL,
 };
-ATTRIBUTE_GROUPS(hid_dev);
+static struct bin_attribute *hid_dev_bin_attrs[] = {
+       &dev_bin_attr_report_desc,
+       NULL
+};
+static const struct attribute_group hid_dev_group = {
+       .attrs = hid_dev_attrs,
+       .bin_attrs = hid_dev_bin_attrs,
+};
+__ATTRIBUTE_GROUPS(hid_dev);
 
 static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);   
 
        if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
                        hdev->bus, hdev->vendor, hdev->product))
@@ -2408,6 +2407,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454_V2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
@@ -2660,6 +2660,7 @@ struct hid_device *hid_allocate_device(void)
        device_initialize(&hdev->dev);
        hdev->dev.release = hid_device_release;
        hdev->dev.bus = &hid_bus_type;
+       device_enable_async_suspend(&hdev->dev);
 
        hid_close_report(hdev);
 
index bcefb9e..5855196 100644 (file)
@@ -655,18 +655,7 @@ static struct hid_driver corsair_driver = {
        .input_mapping = corsair_input_mapping,
 };
 
-static int __init corsair_init(void)
-{
-       return hid_register_driver(&corsair_driver);
-}
-
-static void corsair_exit(void)
-{
-       hid_unregister_driver(&corsair_driver);
-}
-
-module_init(corsair_init);
-module_exit(corsair_exit);
+module_hid_driver(corsair_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Clement Vuchener");
index 7afc3fc..7c38bfa 100644 (file)
@@ -807,7 +807,7 @@ static ssize_t name##_store(struct device *kdev, \
                            struct device_attribute *attr, const char *buf, \
                            size_t count) \
 { \
-       struct hid_device *hdev = container_of(kdev, struct hid_device, dev); \
+       struct hid_device *hdev = to_hid_device(kdev); \
        struct cp2112_usb_config_report cfg; \
        int ret = cp2112_get_usb_config(hdev, &cfg); \
        if (ret) \
@@ -822,7 +822,7 @@ static ssize_t name##_store(struct device *kdev, \
 static ssize_t name##_show(struct device *kdev, \
                           struct device_attribute *attr, char *buf) \
 { \
-       struct hid_device *hdev = container_of(kdev, struct hid_device, dev); \
+       struct hid_device *hdev = to_hid_device(kdev); \
        struct cp2112_usb_config_report cfg; \
        int ret = cp2112_get_usb_config(hdev, &cfg); \
        if (ret) \
@@ -887,7 +887,7 @@ static ssize_t pstr_store(struct device *kdev,
                          struct device_attribute *kattr, const char *buf,
                          size_t count)
 {
-       struct hid_device *hdev = container_of(kdev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(kdev);
        struct cp2112_pstring_attribute *attr =
                container_of(kattr, struct cp2112_pstring_attribute, attr);
        struct cp2112_string_report report;
@@ -918,7 +918,7 @@ static ssize_t pstr_store(struct device *kdev,
 static ssize_t pstr_show(struct device *kdev,
                         struct device_attribute *kattr, char *buf)
 {
-       struct hid_device *hdev = container_of(kdev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(kdev);
        struct cp2112_pstring_attribute *attr =
                container_of(kattr, struct cp2112_pstring_attribute, attr);
        struct cp2112_string_report report;
index 2886b64..acfb522 100644 (file)
@@ -659,13 +659,13 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
 /* enqueue string to 'events' ring buffer */
 void hid_debug_event(struct hid_device *hdev, char *buf)
 {
-       int i;
+       unsigned i;
        struct hid_debug_list *list;
        unsigned long flags;
 
        spin_lock_irqsave(&hdev->debug_list_lock, flags);
        list_for_each_entry(list, &hdev->debug_list, node) {
-               for (i = 0; i < strlen(buf); i++)
+               for (i = 0; buf[i]; i++)
                        list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
                                buf[i];
                list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
index 0d6f135..a298fbd 100644 (file)
@@ -70,7 +70,7 @@ static void gt683r_brightness_set(struct led_classdev *led_cdev,
 {
        int i;
        struct device *dev = led_cdev->dev->parent;
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct gt683r_led *led = hid_get_drvdata(hdev);
 
        for (i = 0; i < GT683R_LED_COUNT; i++) {
@@ -89,8 +89,7 @@ static ssize_t mode_show(struct device *dev,
                                char *buf)
 {
        u8 sysfs_mode;
-       struct hid_device *hdev = container_of(dev->parent,
-                                       struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev->parent);
        struct gt683r_led *led = hid_get_drvdata(hdev);
 
        if (led->mode == GT683R_LED_NORMAL)
@@ -108,8 +107,7 @@ static ssize_t mode_store(struct device *dev,
                                const char *buf, size_t count)
 {
        u8 sysfs_mode;
-       struct hid_device *hdev = container_of(dev->parent,
-                                       struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev->parent);
        struct gt683r_led *led = hid_get_drvdata(hdev);
 
 
index ac1feea..242ec4f 100644 (file)
 #define USB_VENDOR_ID_ITE               0x048d
 #define USB_DEVICE_ID_ITE_LENOVO_YOGA   0x8386
 #define USB_DEVICE_ID_ITE_LENOVO_YOGA2  0x8350
+#define USB_DEVICE_ID_ITE_LENOVO_YOGA900       0x8396
 
 #define USB_VENDOR_ID_JABRA            0x0b0e
 #define USB_DEVICE_ID_JABRA_SPEAK_410  0x0412
 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST  0xc110
 #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
 #define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306
+#define USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS 0xc24d
 #define USB_DEVICE_ID_LOGITECH_MOUSE_C01A      0xc01a
 #define USB_DEVICE_ID_LOGITECH_MOUSE_C05A      0xc05a
 #define USB_DEVICE_ID_LOGITECH_MOUSE_C06A      0xc06a
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2      0xc218
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2    0xc219
 #define USB_DEVICE_ID_LOGITECH_G29_WHEEL       0xc24f
+#define USB_DEVICE_ID_LOGITECH_G920_WHEEL      0xc262
 #define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D     0xc283
 #define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO     0xc286
 #define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940      0xc287
 #define USB_DEVICE_ID_PICOLCD          0xc002
 #define USB_DEVICE_ID_PICOLCD_BOOTLOADER       0xf002
 #define USB_DEVICE_ID_PICK16F1454      0x0042
+#define USB_DEVICE_ID_PICK16F1454_V2   0xf2f7
 
 #define USB_VENDOR_ID_MICROSOFT                0x045e
 #define USB_DEVICE_ID_SIDEWINDER_GV    0x003b
index 2ba6bf6..bcfaf32 100644 (file)
@@ -303,6 +303,7 @@ static enum power_supply_property hidinput_battery_props[] = {
 
 #define HID_BATTERY_QUIRK_PERCENT      (1 << 0) /* always reports percent */
 #define HID_BATTERY_QUIRK_FEATURE      (1 << 1) /* ask for feature report */
+#define HID_BATTERY_QUIRK_IGNORE       (1 << 2) /* completely ignore the battery */
 
 static const struct hid_device_id hid_battery_quirks[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
@@ -320,6 +321,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM,
+               USB_DEVICE_ID_ELECOM_BM084),
+         HID_BATTERY_QUIRK_IGNORE },
        {}
 };
 
@@ -408,6 +412,14 @@ static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
        if (dev->battery != NULL)
                goto out;       /* already initialized? */
 
+       quirks = find_battery_quirk(dev);
+
+       hid_dbg(dev, "device %x:%x:%x %d quirks %d\n",
+               dev->bus, dev->vendor, dev->product, dev->version, quirks);
+
+       if (quirks & HID_BATTERY_QUIRK_IGNORE)
+               goto out;
+
        psy_desc = kzalloc(sizeof(*psy_desc), GFP_KERNEL);
        if (psy_desc == NULL)
                goto out;
@@ -424,11 +436,6 @@ static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
        psy_desc->use_for_apm = 0;
        psy_desc->get_property = hidinput_get_battery_property;
 
-       quirks = find_battery_quirk(dev);
-
-       hid_dbg(dev, "device %x:%x:%x %d quirks %d\n",
-               dev->bus, dev->vendor, dev->product, dev->version, quirks);
-
        min = field->logical_minimum;
        max = field->logical_maximum;
 
@@ -960,6 +967,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                goto ignore;
 
        case HID_UP_LOGIVENDOR:
+               /* intentional fallback */
+       case HID_UP_LOGIVENDOR2:
+               /* intentional fallback */
+       case HID_UP_LOGIVENDOR3:
                goto ignore;
 
        case HID_UP_PID:
index 8979f1f..0125e35 100644 (file)
@@ -220,7 +220,7 @@ static ssize_t attr_fn_lock_show_cptkbd(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n", cptkbd_data->fn_lock);
@@ -231,7 +231,7 @@ static ssize_t attr_fn_lock_store_cptkbd(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
        int value;
 
@@ -250,7 +250,7 @@ static ssize_t attr_sensitivity_show_cptkbd(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n",
@@ -262,7 +262,7 @@ static ssize_t attr_sensitivity_store_cptkbd(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
        int value;
 
@@ -387,7 +387,7 @@ static ssize_t attr_press_to_select_show_tpkbd(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_to_select);
@@ -398,7 +398,7 @@ static ssize_t attr_press_to_select_store_tpkbd(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
        int value;
 
@@ -417,7 +417,7 @@ static ssize_t attr_dragging_show_tpkbd(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->dragging);
@@ -428,7 +428,7 @@ static ssize_t attr_dragging_store_tpkbd(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
        int value;
 
@@ -447,7 +447,7 @@ static ssize_t attr_release_to_select_show_tpkbd(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->release_to_select);
@@ -458,7 +458,7 @@ static ssize_t attr_release_to_select_store_tpkbd(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
        int value;
 
@@ -477,7 +477,7 @@ static ssize_t attr_select_right_show_tpkbd(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->select_right);
@@ -488,7 +488,7 @@ static ssize_t attr_select_right_store_tpkbd(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
        int value;
 
@@ -507,7 +507,7 @@ static ssize_t attr_sensitivity_show_tpkbd(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n",
@@ -519,7 +519,7 @@ static ssize_t attr_sensitivity_store_tpkbd(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
        int value;
 
@@ -536,7 +536,7 @@ static ssize_t attr_press_speed_show_tpkbd(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n",
@@ -548,7 +548,7 @@ static ssize_t attr_press_speed_store_tpkbd(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
        int value;
 
@@ -609,7 +609,7 @@ static enum led_brightness lenovo_led_brightness_get_tpkbd(
                        struct led_classdev *led_cdev)
 {
        struct device *dev = led_cdev->dev->parent;
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
        int led_nr = 0;
 
@@ -625,7 +625,7 @@ static void lenovo_led_brightness_set_tpkbd(struct led_classdev *led_cdev,
                        enum led_brightness value)
 {
        struct device *dev = led_cdev->dev->parent;
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
        struct hid_report *report;
        int led_nr = 0;
index c20ac76..c690fae 100644 (file)
@@ -665,8 +665,9 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
        struct lg_drv_data *drv_data;
        int ret;
 
-       /* Only work with the 1st interface (G29 presents multiple) */
-       if (iface_num != 0) {
+       /* G29 only work with the 1st interface */
+       if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
+           (iface_num != 0)) {
                dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num);
                return -ENODEV;
        }
index fbddcb3..af3a8ec 100644 (file)
@@ -33,8 +33,6 @@
 #include "hid-lg4ff.h"
 #include "hid-ids.h"
 
-#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
-
 #define LG4FF_MMODE_IS_MULTIMODE 0
 #define LG4FF_MMODE_SWITCHED 1
 #define LG4FF_MMODE_NOT_MULTIMODE 2
@@ -1020,7 +1018,7 @@ static void lg4ff_led_set_brightness(struct led_classdev *led_cdev,
                        enum led_brightness value)
 {
        struct device *dev = led_cdev->dev->parent;
-       struct hid_device *hid = container_of(dev, struct hid_device, dev);
+       struct hid_device *hid = to_hid_device(dev);
        struct lg_drv_data *drv_data = hid_get_drvdata(hid);
        struct lg4ff_device_entry *entry;
        int i, state = 0;
@@ -1055,7 +1053,7 @@ static void lg4ff_led_set_brightness(struct led_classdev *led_cdev,
 static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cdev)
 {
        struct device *dev = led_cdev->dev->parent;
-       struct hid_device *hid = container_of(dev, struct hid_device, dev);
+       struct hid_device *hid = to_hid_device(dev);
        struct lg_drv_data *drv_data = hid_get_drvdata(hid);
        struct lg4ff_device_entry *entry;
        int i, value = 0;
index 5fd9786..bd2ab47 100644 (file)
@@ -40,18 +40,22 @@ MODULE_PARM_DESC(disable_tap_to_click,
 
 #define REPORT_ID_HIDPP_SHORT                  0x10
 #define REPORT_ID_HIDPP_LONG                   0x11
+#define REPORT_ID_HIDPP_VERY_LONG              0x12
 
 #define HIDPP_REPORT_SHORT_LENGTH              7
 #define HIDPP_REPORT_LONG_LENGTH               20
+#define HIDPP_REPORT_VERY_LONG_LENGTH          64
 
 #define HIDPP_QUIRK_CLASS_WTP                  BIT(0)
 #define HIDPP_QUIRK_CLASS_M560                 BIT(1)
 #define HIDPP_QUIRK_CLASS_K400                 BIT(2)
+#define HIDPP_QUIRK_CLASS_G920                 BIT(3)
 
 /* bits 2..20 are reserved for classes */
 #define HIDPP_QUIRK_CONNECT_EVENTS             BIT(21)
 #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS       BIT(22)
 #define HIDPP_QUIRK_NO_HIDINPUT                        BIT(23)
+#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS       BIT(24)
 
 #define HIDPP_QUIRK_DELAYED_INIT               (HIDPP_QUIRK_NO_HIDINPUT | \
                                                 HIDPP_QUIRK_CONNECT_EVENTS)
@@ -81,13 +85,13 @@ MODULE_PARM_DESC(disable_tap_to_click,
 struct fap {
        u8 feature_index;
        u8 funcindex_clientid;
-       u8 params[HIDPP_REPORT_LONG_LENGTH - 4U];
+       u8 params[HIDPP_REPORT_VERY_LONG_LENGTH - 4U];
 };
 
 struct rap {
        u8 sub_id;
        u8 reg_address;
-       u8 params[HIDPP_REPORT_LONG_LENGTH - 4U];
+       u8 params[HIDPP_REPORT_VERY_LONG_LENGTH - 4U];
 };
 
 struct hidpp_report {
@@ -144,8 +148,11 @@ static void hidpp_connect_event(struct hidpp_device *hidpp_dev);
 static int __hidpp_send_report(struct hid_device *hdev,
                                struct hidpp_report *hidpp_report)
 {
+       struct hidpp_device *hidpp = hid_get_drvdata(hdev);
        int fields_count, ret;
 
+       hidpp = hid_get_drvdata(hdev);
+
        switch (hidpp_report->report_id) {
        case REPORT_ID_HIDPP_SHORT:
                fields_count = HIDPP_REPORT_SHORT_LENGTH;
@@ -153,6 +160,9 @@ static int __hidpp_send_report(struct hid_device *hdev,
        case REPORT_ID_HIDPP_LONG:
                fields_count = HIDPP_REPORT_LONG_LENGTH;
                break;
+       case REPORT_ID_HIDPP_VERY_LONG:
+               fields_count = HIDPP_REPORT_VERY_LONG_LENGTH;
+               break;
        default:
                return -ENODEV;
        }
@@ -163,9 +173,13 @@ static int __hidpp_send_report(struct hid_device *hdev,
         */
        hidpp_report->device_index = 0xff;
 
-       ret = hid_hw_raw_request(hdev, hidpp_report->report_id,
-               (u8 *)hidpp_report, fields_count, HID_OUTPUT_REPORT,
-               HID_REQ_SET_REPORT);
+       if (hidpp->quirks & HIDPP_QUIRK_FORCE_OUTPUT_REPORTS) {
+               ret = hid_hw_output_report(hdev, (u8 *)hidpp_report, fields_count);
+       } else {
+               ret = hid_hw_raw_request(hdev, hidpp_report->report_id,
+                       (u8 *)hidpp_report, fields_count, HID_OUTPUT_REPORT,
+                       HID_REQ_SET_REPORT);
+       }
 
        return ret == fields_count ? 0 : -1;
 }
@@ -217,8 +231,9 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
                goto exit;
        }
 
-       if (response->report_id == REPORT_ID_HIDPP_LONG &&
-           response->fap.feature_index == HIDPP20_ERROR) {
+       if ((response->report_id == REPORT_ID_HIDPP_LONG ||
+                       response->report_id == REPORT_ID_HIDPP_VERY_LONG) &&
+                       response->fap.feature_index == HIDPP20_ERROR) {
                ret = response->fap.params[1];
                dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret);
                goto exit;
@@ -243,7 +258,11 @@ static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp,
        message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL);
        if (!message)
                return -ENOMEM;
-       message->report_id = REPORT_ID_HIDPP_LONG;
+
+       if (param_count > (HIDPP_REPORT_LONG_LENGTH - 4))
+               message->report_id = REPORT_ID_HIDPP_VERY_LONG;
+       else
+               message->report_id = REPORT_ID_HIDPP_LONG;
        message->fap.feature_index = feat_index;
        message->fap.funcindex_clientid = funcindex_clientid;
        memcpy(&message->fap.params, params, param_count);
@@ -258,13 +277,23 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
        struct hidpp_report *response)
 {
        struct hidpp_report *message;
-       int ret;
+       int ret, max_count;
 
-       if ((report_id != REPORT_ID_HIDPP_SHORT) &&
-           (report_id != REPORT_ID_HIDPP_LONG))
+       switch (report_id) {
+       case REPORT_ID_HIDPP_SHORT:
+               max_count = HIDPP_REPORT_SHORT_LENGTH - 4;
+               break;
+       case REPORT_ID_HIDPP_LONG:
+               max_count = HIDPP_REPORT_LONG_LENGTH - 4;
+               break;
+       case REPORT_ID_HIDPP_VERY_LONG:
+               max_count = HIDPP_REPORT_VERY_LONG_LENGTH - 4;
+               break;
+       default:
                return -EINVAL;
+       }
 
-       if (param_count > sizeof(message->rap.params))
+       if (param_count > max_count)
                return -EINVAL;
 
        message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL);
@@ -508,10 +537,19 @@ static int hidpp_devicenametype_get_device_name(struct hidpp_device *hidpp,
        if (ret)
                return ret;
 
-       if (response.report_id == REPORT_ID_HIDPP_LONG)
+       switch (response.report_id) {
+       case REPORT_ID_HIDPP_VERY_LONG:
+               count = HIDPP_REPORT_VERY_LONG_LENGTH - 4;
+               break;
+       case REPORT_ID_HIDPP_LONG:
                count = HIDPP_REPORT_LONG_LENGTH - 4;
-       else
+               break;
+       case REPORT_ID_HIDPP_SHORT:
                count = HIDPP_REPORT_SHORT_LENGTH - 4;
+               break;
+       default:
+               return -EPROTO;
+       }
 
        if (len_buf < count)
                count = len_buf;
@@ -1257,6 +1295,131 @@ static int k400_connect(struct hid_device *hdev, bool connected)
        return k400_disable_tap_to_click(hidpp);
 }
 
+/* ------------------------------------------------------------------------- */
+/* Logitech G920 Driving Force Racing Wheel for Xbox One                     */
+/* ------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_G920_FORCE_FEEDBACK                 0x8123
+
+/* Using session ID = 1 */
+#define CMD_G920_FORCE_GET_APERTURE                    0x51
+#define CMD_G920_FORCE_SET_APERTURE                    0x61
+
+struct g920_private_data {
+       u8 force_feature;
+       u16 range;
+};
+
+static ssize_t g920_range_show(struct device *dev, struct device_attribute *attr,
+                               char *buf)
+{
+       struct hid_device *hid = to_hid_device(dev);
+       struct hidpp_device *hidpp = hid_get_drvdata(hid);
+       struct g920_private_data *pdata;
+
+       pdata = hidpp->private_data;
+       if (!pdata) {
+               hid_err(hid, "Private driver data not found!\n");
+               return -EINVAL;
+       }
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", pdata->range);
+}
+
+static ssize_t g920_range_store(struct device *dev, struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct hid_device *hid = to_hid_device(dev);
+       struct hidpp_device *hidpp = hid_get_drvdata(hid);
+       struct g920_private_data *pdata;
+       struct hidpp_report response;
+       u8 params[2];
+       int ret;
+       u16 range = simple_strtoul(buf, NULL, 10);
+
+       pdata = hidpp->private_data;
+       if (!pdata) {
+               hid_err(hid, "Private driver data not found!\n");
+               return -EINVAL;
+       }
+
+       if (range < 180)
+               range = 180;
+       else if (range > 900)
+               range = 900;
+
+       params[0] = range >> 8;
+       params[1] = range & 0x00FF;
+
+       ret = hidpp_send_fap_command_sync(hidpp, pdata->force_feature,
+               CMD_G920_FORCE_SET_APERTURE, params, 2, &response);
+       if (ret)
+               return ret;
+
+       pdata->range = range;
+       return count;
+}
+
+static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, g920_range_show, g920_range_store);
+
+static int g920_allocate(struct hid_device *hdev)
+{
+       struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+       struct g920_private_data *pdata;
+
+       pdata = devm_kzalloc(&hdev->dev, sizeof(struct g920_private_data),
+                       GFP_KERNEL);
+       if (!pdata)
+               return -ENOMEM;
+
+       hidpp->private_data = pdata;
+
+       return 0;
+}
+
+static int g920_get_config(struct hidpp_device *hidpp)
+{
+       struct g920_private_data *pdata = hidpp->private_data;
+       struct hidpp_report response;
+       u8 feature_type;
+       u8 feature_index;
+       int ret;
+
+       pdata = hidpp->private_data;
+       if (!pdata) {
+               hid_err(hidpp->hid_dev, "Private driver data not found!\n");
+               return -EINVAL;
+       }
+
+       /* Find feature and store for later use */
+       ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK,
+               &feature_index, &feature_type);
+       if (ret)
+               return ret;
+
+       pdata->force_feature = feature_index;
+
+       /* Read current Range */
+       ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+               CMD_G920_FORCE_GET_APERTURE, NULL, 0, &response);
+       if (ret > 0) {
+               hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+                       __func__, ret);
+               return -EPROTO;
+       }
+       if (ret)
+               return ret;
+
+       pdata->range = get_unaligned_be16(&response.fap.params[0]);
+
+       /* Create sysfs interface */
+       ret = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
+       if (ret)
+               hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d\n", ret);
+
+       return 0;
+}
+
 /* -------------------------------------------------------------------------- */
 /* Generic HID++ devices                                                      */
 /* -------------------------------------------------------------------------- */
@@ -1276,6 +1439,25 @@ static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
        return 0;
 }
 
+static int hidpp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+               struct hid_field *field, struct hid_usage *usage,
+               unsigned long **bit, int *max)
+{
+       struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+
+       /* Ensure that Logitech G920 is not given a default fuzz/flat value */
+       if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+               if (usage->type == EV_ABS && (usage->code == ABS_X ||
+                               usage->code == ABS_Y || usage->code == ABS_Z ||
+                               usage->code == ABS_RZ)) {
+                       field->application = HID_GD_MULTIAXIS;
+               }
+       }
+
+       return 0;
+}
+
+
 static void hidpp_populate_input(struct hidpp_device *hidpp,
                struct input_dev *input, bool origin_is_hid_core)
 {
@@ -1347,6 +1529,14 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
 
        /* Generic HID++ processing. */
        switch (data[0]) {
+       case REPORT_ID_HIDPP_VERY_LONG:
+               if (size != HIDPP_REPORT_VERY_LONG_LENGTH) {
+                       hid_err(hdev, "received hid++ report of bad size (%d)",
+                               size);
+                       return 1;
+               }
+               ret = hidpp_raw_hidpp_event(hidpp, data, size);
+               break;
        case REPORT_ID_HIDPP_LONG:
                if (size != HIDPP_REPORT_LONG_LENGTH) {
                        hid_err(hdev, "received hid++ report of bad size (%d)",
@@ -1393,10 +1583,12 @@ static void hidpp_overwrite_name(struct hid_device *hdev, bool use_unifying)
        else
                name = hidpp_get_device_name(hidpp);
 
-       if (!name)
+       if (!name) {
                hid_err(hdev, "unable to retrieve the name of the device");
-       else
+       } else {
+               dbg_hid("HID++: Got name: %s\n", name);
                snprintf(hdev->name, sizeof(hdev->name), "%s", name);
+       }
 
        kfree(name);
 }
@@ -1547,6 +1739,10 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                ret = k400_allocate(hdev);
                if (ret)
                        goto allocate_fail;
+       } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+               ret = g920_allocate(hdev);
+               if (ret)
+                       goto allocate_fail;
        }
 
        INIT_WORK(&hidpp->work, delayed_work_cb);
@@ -1559,6 +1755,25 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto hid_parse_fail;
        }
 
+       if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
+               connect_mask &= ~HID_CONNECT_HIDINPUT;
+
+       if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+               ret = hid_hw_start(hdev, connect_mask);
+               if (ret) {
+                       hid_err(hdev, "hw start failed\n");
+                       goto hid_hw_start_fail;
+               }
+               ret = hid_hw_open(hdev);
+               if (ret < 0) {
+                       dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
+                               __func__, ret);
+                       hid_hw_stop(hdev);
+                       goto hid_hw_start_fail;
+               }
+       }
+
+
        /* Allow incoming packets */
        hid_device_io_start(hdev);
 
@@ -1567,8 +1782,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                if (!connected) {
                        ret = -ENODEV;
                        hid_err(hdev, "Device not connected");
-                       hid_device_io_stop(hdev);
-                       goto hid_parse_fail;
+                       goto hid_hw_open_failed;
                }
 
                hid_info(hdev, "HID++ %u.%u device connected.\n",
@@ -1581,19 +1795,22 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
                ret = wtp_get_config(hidpp);
                if (ret)
-                       goto hid_parse_fail;
+                       goto hid_hw_open_failed;
+       } else if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
+               ret = g920_get_config(hidpp);
+               if (ret)
+                       goto hid_hw_open_failed;
        }
 
        /* Block incoming packets */
        hid_device_io_stop(hdev);
 
-       if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
-               connect_mask &= ~HID_CONNECT_HIDINPUT;
-
-       ret = hid_hw_start(hdev, connect_mask);
-       if (ret) {
-               hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
-               goto hid_hw_start_fail;
+       if (!(hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
+               ret = hid_hw_start(hdev, connect_mask);
+               if (ret) {
+                       hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+                       goto hid_hw_start_fail;
+               }
        }
 
        if (hidpp->quirks & HIDPP_QUIRK_CONNECT_EVENTS) {
@@ -1605,6 +1822,13 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        return ret;
 
+hid_hw_open_failed:
+       hid_device_io_stop(hdev);
+       if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+               device_remove_file(&hdev->dev, &dev_attr_range);
+               hid_hw_close(hdev);
+               hid_hw_stop(hdev);
+       }
 hid_hw_start_fail:
 hid_parse_fail:
        cancel_work_sync(&hidpp->work);
@@ -1618,9 +1842,13 @@ static void hidpp_remove(struct hid_device *hdev)
 {
        struct hidpp_device *hidpp = hid_get_drvdata(hdev);
 
+       if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+               device_remove_file(&hdev->dev, &dev_attr_range);
+               hid_hw_close(hdev);
+       }
+       hid_hw_stop(hdev);
        cancel_work_sync(&hidpp->work);
        mutex_destroy(&hidpp->send_mutex);
-       hid_hw_stop(hdev);
 }
 
 static const struct hid_device_id hidpp_devices[] = {
@@ -1648,6 +1876,9 @@ static const struct hid_device_id hidpp_devices[] = {
 
        { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
                USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
+
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
+               .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
        {}
 };
 
@@ -1661,6 +1892,7 @@ static struct hid_driver hidpp_driver = {
        .raw_event = hidpp_raw_event,
        .input_configured = hidpp_input_configured,
        .input_mapping = hidpp_input_mapping,
+       .input_mapped = hidpp_input_mapped,
 };
 
 module_hid_driver(hidpp_driver);
index 4ee7716..296d499 100644 (file)
@@ -272,7 +272,7 @@ static ssize_t mt_show_quirks(struct device *dev,
                           struct device_attribute *attr,
                           char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct mt_device *td = hid_get_drvdata(hdev);
 
        return sprintf(buf, "%u\n", td->mtclass.quirks);
@@ -282,7 +282,7 @@ static ssize_t mt_set_quirks(struct device *dev,
                          struct device_attribute *attr,
                          const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct mt_device *td = hid_get_drvdata(hdev);
 
        unsigned long val;
index 756d1ef..1b0084d 100644 (file)
@@ -173,7 +173,7 @@ static ssize_t show_phys_width(struct device *dev,
                               struct device_attribute *attr,
                               char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        return sprintf(buf, "%d\n", nd->sensor_physical_width);
@@ -185,7 +185,7 @@ static ssize_t show_phys_height(struct device *dev,
                                struct device_attribute *attr,
                                char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        return sprintf(buf, "%d\n", nd->sensor_physical_height);
@@ -197,7 +197,7 @@ static ssize_t show_log_width(struct device *dev,
                              struct device_attribute *attr,
                              char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        return sprintf(buf, "%d\n", nd->sensor_logical_width);
@@ -209,7 +209,7 @@ static ssize_t show_log_height(struct device *dev,
                               struct device_attribute *attr,
                               char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        return sprintf(buf, "%d\n", nd->sensor_logical_height);
@@ -221,7 +221,7 @@ static ssize_t show_min_width(struct device *dev,
                              struct device_attribute *attr,
                              char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        return sprintf(buf, "%d\n", nd->min_width *
@@ -233,7 +233,7 @@ static ssize_t set_min_width(struct device *dev,
                             struct device_attribute *attr,
                             const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        unsigned long val;
@@ -256,7 +256,7 @@ static ssize_t show_min_height(struct device *dev,
                               struct device_attribute *attr,
                               char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        return sprintf(buf, "%d\n", nd->min_height *
@@ -268,7 +268,7 @@ static ssize_t set_min_height(struct device *dev,
                              struct device_attribute *attr,
                              const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        unsigned long val;
@@ -292,7 +292,7 @@ static ssize_t show_activate_slack(struct device *dev,
                                   struct device_attribute *attr,
                                   char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        return sprintf(buf, "%d\n", nd->activate_slack);
@@ -302,7 +302,7 @@ static ssize_t set_activate_slack(struct device *dev,
                                  struct device_attribute *attr,
                                  const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        unsigned long val;
@@ -325,7 +325,7 @@ static ssize_t show_activation_width(struct device *dev,
                                     struct device_attribute *attr,
                                     char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        return sprintf(buf, "%d\n", nd->activation_width *
@@ -337,7 +337,7 @@ static ssize_t set_activation_width(struct device *dev,
                                    struct device_attribute *attr,
                                    const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        unsigned long val;
@@ -361,7 +361,7 @@ static ssize_t show_activation_height(struct device *dev,
                                      struct device_attribute *attr,
                                      char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        return sprintf(buf, "%d\n", nd->activation_height *
@@ -373,7 +373,7 @@ static ssize_t set_activation_height(struct device *dev,
                                     struct device_attribute *attr,
                                     const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        unsigned long val;
@@ -397,7 +397,7 @@ static ssize_t show_deactivate_slack(struct device *dev,
                                     struct device_attribute *attr,
                                     char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        return sprintf(buf, "%d\n", -nd->deactivate_slack);
@@ -407,7 +407,7 @@ static ssize_t set_deactivate_slack(struct device *dev,
                                    struct device_attribute *attr,
                                    const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct ntrig_data *nd = hid_get_drvdata(hdev);
 
        unsigned long val;
index e994f9c..a802b4f 100644 (file)
@@ -66,7 +66,7 @@ static void picolcd_led_set_brightness(struct led_classdev *led_cdev,
        int i, state = 0;
 
        dev  = led_cdev->dev->parent;
-       hdev = container_of(dev, struct hid_device, dev);
+       hdev = to_hid_device(dev);
        data = hid_get_drvdata(hdev);
        if (!data)
                return;
@@ -93,7 +93,7 @@ static enum led_brightness picolcd_led_get_brightness(struct led_classdev *led_c
        int i, value = 0;
 
        dev  = led_cdev->dev->parent;
-       hdev = container_of(dev, struct hid_device, dev);
+       hdev = to_hid_device(dev);
        data = hid_get_drvdata(hdev);
        for (i = 0; i < 8; i++)
                if (led_cdev == data->led[i]) {
index 3a207c0..f095bf8 100644 (file)
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(enable, "Enable for the PC-MIDI virtual audio driver");
 static ssize_t show_channel(struct device *dev,
        struct device_attribute *attr, char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct pk_device *pk = hid_get_drvdata(hdev);
 
        dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel);
@@ -116,7 +116,7 @@ static ssize_t show_channel(struct device *dev,
 static ssize_t store_channel(struct device *dev,
        struct device_attribute *attr, const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct pk_device *pk = hid_get_drvdata(hdev);
 
        unsigned channel = 0;
@@ -140,7 +140,7 @@ static struct device_attribute *sysfs_device_attr_channel = {
 static ssize_t show_sustain(struct device *dev,
  struct device_attribute *attr, char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct pk_device *pk = hid_get_drvdata(hdev);
 
        dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain);
@@ -153,7 +153,7 @@ static ssize_t show_sustain(struct device *dev,
 static ssize_t store_sustain(struct device *dev,
        struct device_attribute *attr, const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct pk_device *pk = hid_get_drvdata(hdev);
 
        unsigned sustain = 0;
@@ -179,7 +179,7 @@ static struct device_attribute *sysfs_device_attr_sustain = {
 static ssize_t show_octave(struct device *dev,
        struct device_attribute *attr, char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct pk_device *pk = hid_get_drvdata(hdev);
 
        dbg_hid("pcmidi sysfs read octave=%d\n", pk->pm->midi_octave);
@@ -192,7 +192,7 @@ static ssize_t show_octave(struct device *dev,
 static ssize_t store_octave(struct device *dev,
        struct device_attribute *attr, const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct pk_device *pk = hid_get_drvdata(hdev);
 
        int octave = 0;
index 1948208..329c5d1 100644 (file)
@@ -191,8 +191,7 @@ static ssize_t arvo_sysfs_write(struct file *fp,
                struct kobject *kobj, void const *buf,
                loff_t off, size_t count, size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
@@ -211,8 +210,7 @@ static ssize_t arvo_sysfs_read(struct file *fp,
                struct kobject *kobj, void *buf, loff_t off,
                size_t count, size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
index 02e28e9..8155ac5 100644 (file)
@@ -134,8 +134,7 @@ ssize_t roccat_common2_sysfs_read(struct file *fp, struct kobject *kobj,
                char *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
@@ -158,8 +157,7 @@ ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj,
                void const *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
index bc62ed9..02db537 100644 (file)
@@ -121,8 +121,7 @@ static ssize_t isku_sysfs_read(struct file *fp, struct kobject *kobj,
                char *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
@@ -144,8 +143,7 @@ static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj,
                void const *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
index c292650..bf4675a 100644 (file)
@@ -269,8 +269,7 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
 static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
                struct bin_attribute *attr, char *buf,
                loff_t off, size_t count) {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
 
        if (off >= sizeof(struct kone_settings))
@@ -294,8 +293,7 @@ static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
 static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
                struct bin_attribute *attr, char *buf,
                loff_t off, size_t count) {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval = 0, difference, old_profile;
@@ -332,8 +330,7 @@ static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
 static ssize_t kone_sysfs_read_profilex(struct file *fp,
                struct kobject *kobj, struct bin_attribute *attr,
                char *buf, loff_t off, size_t count) {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
 
        if (off >= sizeof(struct kone_profile))
@@ -353,8 +350,7 @@ static ssize_t kone_sysfs_read_profilex(struct file *fp,
 static ssize_t kone_sysfs_write_profilex(struct file *fp,
                struct kobject *kobj, struct bin_attribute *attr,
                char *buf, loff_t off, size_t count) {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        struct kone_profile *profile;
index 5e99fcd..09e8fc7 100644 (file)
@@ -87,8 +87,7 @@ static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
                char *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
@@ -113,8 +112,7 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
                void const *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
@@ -193,8 +191,7 @@ static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
                struct kobject *kobj, struct bin_attribute *attr, char *buf,
                loff_t off, size_t count)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        ssize_t retval;
 
@@ -212,8 +209,7 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
                struct kobject *kobj, struct bin_attribute *attr, char *buf,
                loff_t off, size_t count)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        ssize_t retval;
 
index 9660477..43617fb 100644 (file)
@@ -128,8 +128,7 @@ static ssize_t kovaplus_sysfs_read(struct file *fp, struct kobject *kobj,
                char *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
@@ -154,8 +153,7 @@ static ssize_t kovaplus_sysfs_write(struct file *fp, struct kobject *kobj,
                void const *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
@@ -221,8 +219,7 @@ static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
                struct kobject *kobj, struct bin_attribute *attr, char *buf,
                loff_t off, size_t count)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        ssize_t retval;
 
@@ -240,8 +237,7 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
                struct kobject *kobj, struct bin_attribute *attr, char *buf,
                loff_t off, size_t count)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        ssize_t retval;
 
index 65e2e76..ac1a731 100644 (file)
@@ -30,7 +30,7 @@ static ssize_t lua_sysfs_read(struct file *fp, struct kobject *kobj,
                char *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct lua_device *lua = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
@@ -52,7 +52,7 @@ static ssize_t lua_sysfs_write(struct file *fp, struct kobject *kobj,
                void const *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct lua_device *lua = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
index 47d7e74..b30aa7b 100644 (file)
@@ -90,8 +90,7 @@ static ssize_t pyra_sysfs_read(struct file *fp, struct kobject *kobj,
                char *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
@@ -116,8 +115,7 @@ static ssize_t pyra_sysfs_write(struct file *fp, struct kobject *kobj,
                void const *buf, loff_t off, size_t count,
                size_t real_size, uint command)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval;
@@ -191,8 +189,7 @@ static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
                struct kobject *kobj, struct bin_attribute *attr, char *buf,
                loff_t off, size_t count)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        ssize_t retval;
 
@@ -210,8 +207,7 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
                struct kobject *kobj, struct bin_attribute *attr, char *buf,
                loff_t off, size_t count)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        ssize_t retval;
 
@@ -248,8 +244,7 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
                struct kobject *kobj, struct bin_attribute *attr, char *buf,
                loff_t off, size_t count)
 {
-       struct device *dev =
-                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct device *dev = kobj_to_dev(kobj)->parent->parent;
        struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
        struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
        int retval = 0;
index 92870cd..58ed8f2 100644 (file)
@@ -794,6 +794,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_ITE,
                        USB_DEVICE_ID_ITE_LENOVO_YOGA2),
                        .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_ITE,
+                       USB_DEVICE_ID_ITE_LENOVO_YOGA900),
+                       .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
                     HID_ANY_ID) },
        { }
index 774cd22..9b8db0e 100644 (file)
@@ -1028,6 +1028,7 @@ struct sony_sc {
        struct led_classdev *leds[MAX_LEDS];
        unsigned long quirks;
        struct work_struct state_worker;
+       void(*send_output_report)(struct sony_sc*);
        struct power_supply *battery;
        struct power_supply_desc battery_desc;
        int device_id;
@@ -1044,6 +1045,7 @@ struct sony_sc {
        __u8 battery_charging;
        __u8 battery_capacity;
        __u8 led_state[MAX_LEDS];
+       __u8 resume_led_state[MAX_LEDS];
        __u8 led_delay_on[MAX_LEDS];
        __u8 led_delay_off[MAX_LEDS];
        __u8 led_count;
@@ -1137,11 +1139,11 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
         * the gyroscope values to corresponding axes so we need a
         * modified one.
         */
-       if ((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && *rsize == 467) {
+       if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
                hid_info(hdev, "Using modified Dualshock 4 report descriptor with gyroscope axes\n");
                rdesc = dualshock4_usb_rdesc;
                *rsize = sizeof(dualshock4_usb_rdesc);
-       } else if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) && *rsize == 357) {
+       } else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
                hid_info(hdev, "Using modified Dualshock 4 Bluetooth report descriptor\n");
                rdesc = dualshock4_bt_rdesc;
                *rsize = sizeof(dualshock4_bt_rdesc);
@@ -1549,7 +1551,7 @@ static void sony_led_set_brightness(struct led_classdev *led,
                                    enum led_brightness value)
 {
        struct device *dev = led->dev->parent;
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct sony_sc *drv_data;
 
        int n;
@@ -1591,7 +1593,7 @@ static void sony_led_set_brightness(struct led_classdev *led,
 static enum led_brightness sony_led_get_brightness(struct led_classdev *led)
 {
        struct device *dev = led->dev->parent;
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct sony_sc *drv_data;
 
        int n;
@@ -1614,7 +1616,7 @@ static int sony_led_blink_set(struct led_classdev *led, unsigned long *delay_on,
                                unsigned long *delay_off)
 {
        struct device *dev = led->dev->parent;
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct sony_sc *drv_data = hid_get_drvdata(hdev);
        int n;
        __u8 new_on, new_off;
@@ -1789,7 +1791,7 @@ error_leds:
        return ret;
 }
 
-static void sixaxis_state_worker(struct work_struct *work)
+static void sixaxis_send_output_report(struct sony_sc *sc)
 {
        static const union sixaxis_output_report_01 default_report = {
                .buf = {
@@ -1803,7 +1805,6 @@ static void sixaxis_state_worker(struct work_struct *work)
                        0x00, 0x00, 0x00, 0x00, 0x00
                }
        };
-       struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
        struct sixaxis_output_report *report =
                (struct sixaxis_output_report *)sc->output_report_dmabuf;
        int n;
@@ -1846,9 +1847,8 @@ static void sixaxis_state_worker(struct work_struct *work)
                        HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
 }
 
-static void dualshock4_state_worker(struct work_struct *work)
+static void dualshock4_send_output_report(struct sony_sc *sc)
 {
-       struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
        struct hid_device *hdev = sc->hdev;
        __u8 *buf = sc->output_report_dmabuf;
        int offset;
@@ -1893,9 +1893,8 @@ static void dualshock4_state_worker(struct work_struct *work)
                                HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
 }
 
-static void motion_state_worker(struct work_struct *work)
+static void motion_send_output_report(struct sony_sc *sc)
 {
-       struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
        struct hid_device *hdev = sc->hdev;
        struct motion_output_report_02 *report =
                (struct motion_output_report_02 *)sc->output_report_dmabuf;
@@ -1914,6 +1913,18 @@ static void motion_state_worker(struct work_struct *work)
        hid_hw_output_report(hdev, (__u8 *)report, MOTION_REPORT_0x02_SIZE);
 }
 
+static inline void sony_send_output_report(struct sony_sc *sc)
+{
+       if (sc->send_output_report)
+               sc->send_output_report(sc);
+}
+
+static void sony_state_worker(struct work_struct *work)
+{
+       struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
+       sc->send_output_report(sc);
+}
+
 static int sony_allocate_output_report(struct sony_sc *sc)
 {
        if ((sc->quirks & SIXAXIS_CONTROLLER) ||
@@ -2241,11 +2252,13 @@ static void sony_release_device_id(struct sony_sc *sc)
        }
 }
 
-static inline void sony_init_work(struct sony_sc *sc,
-                                       void (*worker)(struct work_struct *))
+static inline void sony_init_output_report(struct sony_sc *sc,
+                               void(*send_output_report)(struct sony_sc*))
 {
+       sc->send_output_report = send_output_report;
+
        if (!sc->worker_initialized)
-               INIT_WORK(&sc->state_worker, worker);
+               INIT_WORK(&sc->state_worker, sony_state_worker);
 
        sc->worker_initialized = 1;
 }
@@ -2319,7 +2332,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
                hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
                hdev->quirks |= HID_QUIRK_SKIP_OUTPUT_REPORT_ID;
                ret = sixaxis_set_operational_usb(hdev);
-               sony_init_work(sc, sixaxis_state_worker);
+               sony_init_output_report(sc, sixaxis_send_output_report);
        } else if ((sc->quirks & SIXAXIS_CONTROLLER_BT) ||
                        (sc->quirks & NAVIGATION_CONTROLLER_BT)) {
                /*
@@ -2328,7 +2341,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
                 */
                hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
                ret = sixaxis_set_operational_bt(hdev);
-               sony_init_work(sc, sixaxis_state_worker);
+               sony_init_output_report(sc, sixaxis_send_output_report);
        } else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
                if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
                        /*
@@ -2343,9 +2356,9 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
                        }
                }
 
-               sony_init_work(sc, dualshock4_state_worker);
+               sony_init_output_report(sc, dualshock4_send_output_report);
        } else if (sc->quirks & MOTION_CONTROLLER) {
-               sony_init_work(sc, motion_state_worker);
+               sony_init_output_report(sc, motion_send_output_report);
        } else {
                ret = 0;
        }
@@ -2421,6 +2434,56 @@ static void sony_remove(struct hid_device *hdev)
        hid_hw_stop(hdev);
 }
 
+#ifdef CONFIG_PM
+
+static int sony_suspend(struct hid_device *hdev, pm_message_t message)
+{
+       /*
+        * On suspend save the current LED state,
+        * stop running force-feedback and blank the LEDS.
+         */
+       if (SONY_LED_SUPPORT || SONY_FF_SUPPORT) {
+               struct sony_sc *sc = hid_get_drvdata(hdev);
+
+#ifdef CONFIG_SONY_FF
+               sc->left = sc->right = 0;
+#endif
+
+               memcpy(sc->resume_led_state, sc->led_state,
+                       sizeof(sc->resume_led_state));
+               memset(sc->led_state, 0, sizeof(sc->led_state));
+
+               sony_send_output_report(sc);
+       }
+
+       return 0;
+}
+
+static int sony_resume(struct hid_device *hdev)
+{
+       /* Restore the state of controller LEDs on resume */
+       if (SONY_LED_SUPPORT) {
+               struct sony_sc *sc = hid_get_drvdata(hdev);
+
+               memcpy(sc->led_state, sc->resume_led_state,
+                       sizeof(sc->led_state));
+
+               /*
+                * The Sixaxis and navigation controllers on USB need to be
+                * reinitialized on resume or they won't behave properly.
+                */
+               if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
+                       (sc->quirks & NAVIGATION_CONTROLLER_USB))
+                       sixaxis_set_operational_usb(sc->hdev);
+
+               sony_set_leds(sc);
+       }
+
+       return 0;
+}
+
+#endif
+
 static const struct hid_device_id sony_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
                .driver_data = SIXAXIS_CONTROLLER_USB },
@@ -2470,7 +2533,13 @@ static struct hid_driver sony_driver = {
        .probe            = sony_probe,
        .remove           = sony_remove,
        .report_fixup     = sony_report_fixup,
-       .raw_event        = sony_raw_event
+       .raw_event        = sony_raw_event,
+
+#ifdef CONFIG_PM
+       .suspend          = sony_suspend,
+       .resume           = sony_resume,
+       .reset_resume     = sony_resume,
+#endif
 };
 
 static int __init sony_init(void)
index 3edd4ac..ec18768 100644 (file)
@@ -141,7 +141,7 @@ static void steelseries_srws1_led_all_set_brightness(struct led_classdev *led_cd
                        enum led_brightness value)
 {
        struct device *dev = led_cdev->dev->parent;
-       struct hid_device *hid = container_of(dev, struct hid_device, dev);
+       struct hid_device *hid = to_hid_device(dev);
        struct steelseries_srws1_data *drv_data = hid_get_drvdata(hid);
 
        if (!drv_data) {
@@ -160,7 +160,7 @@ static void steelseries_srws1_led_all_set_brightness(struct led_classdev *led_cd
 static enum led_brightness steelseries_srws1_led_all_get_brightness(struct led_classdev *led_cdev)
 {
        struct device *dev = led_cdev->dev->parent;
-       struct hid_device *hid = container_of(dev, struct hid_device, dev);
+       struct hid_device *hid = to_hid_device(dev);
        struct steelseries_srws1_data *drv_data;
 
        drv_data = hid_get_drvdata(hid);
@@ -177,7 +177,7 @@ static void steelseries_srws1_led_set_brightness(struct led_classdev *led_cdev,
                        enum led_brightness value)
 {
        struct device *dev = led_cdev->dev->parent;
-       struct hid_device *hid = container_of(dev, struct hid_device, dev);
+       struct hid_device *hid = to_hid_device(dev);
        struct steelseries_srws1_data *drv_data = hid_get_drvdata(hid);
        int i, state = 0;
 
@@ -205,7 +205,7 @@ static void steelseries_srws1_led_set_brightness(struct led_classdev *led_cdev,
 static enum led_brightness steelseries_srws1_led_get_brightness(struct led_classdev *led_cdev)
 {
        struct device *dev = led_cdev->dev->parent;
-       struct hid_device *hid = container_of(dev, struct hid_device, dev);
+       struct hid_device *hid = to_hid_device(dev);
        struct steelseries_srws1_data *drv_data;
        int i, value = 0;
 
index 05e23c4..4390eee 100644 (file)
@@ -296,14 +296,12 @@ static const struct wiimod_ops wiimod_battery = {
 
 static enum led_brightness wiimod_led_get(struct led_classdev *led_dev)
 {
-       struct wiimote_data *wdata;
        struct device *dev = led_dev->dev->parent;
+       struct wiimote_data *wdata = dev_to_wii(dev);
        int i;
        unsigned long flags;
        bool value = false;
 
-       wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
-
        for (i = 0; i < 4; ++i) {
                if (wdata->leds[i] == led_dev) {
                        spin_lock_irqsave(&wdata->state.lock, flags);
@@ -319,14 +317,12 @@ static enum led_brightness wiimod_led_get(struct led_classdev *led_dev)
 static void wiimod_led_set(struct led_classdev *led_dev,
                           enum led_brightness value)
 {
-       struct wiimote_data *wdata;
        struct device *dev = led_dev->dev->parent;
+       struct wiimote_data *wdata = dev_to_wii(dev);
        int i;
        unsigned long flags;
        __u8 state, flag;
 
-       wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
-
        for (i = 0; i < 4; ++i) {
                if (wdata->leds[i] == led_dev) {
                        flag = WIIPROTO_FLAG_LED(i + 1);
index 875694d..510ca77 100644 (file)
@@ -256,8 +256,7 @@ enum wiiproto_reqs {
        WIIPROTO_REQ_MAX
 };
 
-#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \
-                                                                       dev))
+#define dev_to_wii(pdev) hid_get_drvdata(to_hid_device(pdev))
 
 void __wiimote_schedule(struct wiimote_data *wdata);
 
index 10bd8e6..b921693 100644 (file)
@@ -151,6 +151,7 @@ struct i2c_hid {
        struct i2c_hid_platform_data pdata;
 
        bool                    irq_wake_enabled;
+       struct mutex            reset_lock;
 };
 
 static int __i2c_hid_command(struct i2c_client *client,
@@ -356,9 +357,16 @@ static int i2c_hid_hwreset(struct i2c_client *client)
 
        i2c_hid_dbg(ihid, "%s\n", __func__);
 
+       /*
+        * This prevents sending feature reports while the device is
+        * being reset. Otherwise we may lose the reset complete
+        * interrupt.
+        */
+       mutex_lock(&ihid->reset_lock);
+
        ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
        if (ret)
-               return ret;
+               goto out_unlock;
 
        i2c_hid_dbg(ihid, "resetting...\n");
 
@@ -366,10 +374,11 @@ static int i2c_hid_hwreset(struct i2c_client *client)
        if (ret) {
                dev_err(&client->dev, "failed to reset device.\n");
                i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
-               return ret;
        }
 
-       return 0;
+out_unlock:
+       mutex_unlock(&ihid->reset_lock);
+       return ret;
 }
 
 static void i2c_hid_get_input(struct i2c_hid *ihid)
@@ -587,12 +596,15 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
                size_t count, unsigned char report_type, bool use_data)
 {
        struct i2c_client *client = hid->driver_data;
+       struct i2c_hid *ihid = i2c_get_clientdata(client);
        int report_id = buf[0];
        int ret;
 
        if (report_type == HID_INPUT_REPORT)
                return -EINVAL;
 
+       mutex_lock(&ihid->reset_lock);
+
        if (report_id) {
                buf++;
                count--;
@@ -605,6 +617,8 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
        if (report_id && ret >= 0)
                ret++; /* add report_id to the number of transfered bytes */
 
+       mutex_unlock(&ihid->reset_lock);
+
        return ret;
 }
 
@@ -990,6 +1004,7 @@ static int i2c_hid_probe(struct i2c_client *client,
        ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
 
        init_waitqueue_head(&ihid->wait);
+       mutex_init(&ihid->reset_lock);
 
        /* we need to allocate the command buffer without knowing the maximum
         * size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
@@ -1184,7 +1199,6 @@ MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
 static struct i2c_driver i2c_hid_driver = {
        .driver = {
                .name   = "i2c_hid",
-               .owner  = THIS_MODULE,
                .pm     = &i2c_hid_pm,
                .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
                .of_match_table = of_match_ptr(i2c_hid_of_match),
index 36712e9..ad71160 100644 (file)
@@ -274,10 +274,10 @@ static void hid_irq_in(struct urb *urb)
 
        switch (urb->status) {
        case 0:                 /* success */
-               usbhid_mark_busy(usbhid);
                usbhid->retry_delay = 0;
                if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open)
                        break;
+               usbhid_mark_busy(usbhid);
                if (!test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) {
                        hid_input_report(urb->context, HID_INPUT_REPORT,
                                         urb->transfer_buffer,
@@ -477,8 +477,6 @@ static void hid_ctrl(struct urb *urb)
        struct usbhid_device *usbhid = hid->driver_data;
        int unplug = 0, status = urb->status;
 
-       spin_lock(&usbhid->lock);
-
        switch (status) {
        case 0:                 /* success */
                if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN)
@@ -498,6 +496,8 @@ static void hid_ctrl(struct urb *urb)
                hid_warn(urb->dev, "ctrl urb status %d received\n", status);
        }
 
+       spin_lock(&usbhid->lock);
+
        if (unplug) {
                usbhid->ctrltail = usbhid->ctrlhead;
        } else {
index 94bb137..2324520 100644 (file)
@@ -84,6 +84,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
index 807922b..fa47d66 100644 (file)
@@ -96,7 +96,7 @@ struct usbhid_device {
 };
 
 #define        hid_to_usb_dev(hid_dev) \
-       container_of(hid_dev->dev.parent->parent, struct usb_device, dev)
+       to_usb_device(hid_dev->dev.parent->parent)
 
 #endif
 
index e06af5b..5cb21dd 100644 (file)
@@ -686,7 +686,7 @@ out:
 static ssize_t wacom_led_select_store(struct device *dev, int set_id,
                                      const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct wacom *wacom = hid_get_drvdata(hdev);
        unsigned int id;
        int err;
@@ -714,7 +714,7 @@ static ssize_t wacom_led##SET_ID##_select_store(struct device *dev, \
 static ssize_t wacom_led##SET_ID##_select_show(struct device *dev,     \
        struct device_attribute *attr, char *buf)                       \
 {                                                                      \
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);\
+       struct hid_device *hdev = to_hid_device(dev);\
        struct wacom *wacom = hid_get_drvdata(hdev);                    \
        return scnprintf(buf, PAGE_SIZE, "%d\n",                        \
                         wacom->led.select[SET_ID]);                    \
@@ -750,7 +750,7 @@ static ssize_t wacom_luminance_store(struct wacom *wacom, u8 *dest,
 static ssize_t wacom_##name##_luminance_store(struct device *dev,      \
        struct device_attribute *attr, const char *buf, size_t count)   \
 {                                                                      \
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);\
+       struct hid_device *hdev = to_hid_device(dev);\
        struct wacom *wacom = hid_get_drvdata(hdev);                    \
                                                                        \
        return wacom_luminance_store(wacom, &wacom->led.field,          \
@@ -773,7 +773,7 @@ DEVICE_LUMINANCE_ATTR(buttons, img_lum);
 static ssize_t wacom_button_image_store(struct device *dev, int button_id,
                                        const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct wacom *wacom = hid_get_drvdata(hdev);
        int err;
        unsigned len;
@@ -1097,7 +1097,7 @@ static ssize_t wacom_show_speed(struct device *dev,
                                struct device_attribute
                                *attr, char *buf)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct wacom *wacom = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%i\n", wacom->wacom_wac.bt_high_speed);
@@ -1107,7 +1107,7 @@ static ssize_t wacom_store_speed(struct device *dev,
                                struct device_attribute *attr,
                                const char *buf, size_t count)
 {
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct hid_device *hdev = to_hid_device(dev);
        struct wacom *wacom = hid_get_drvdata(hdev);
        u8 new_speed;
 
@@ -1130,8 +1130,8 @@ static ssize_t wacom_show_remote_mode(struct kobject *kobj,
                                      struct kobj_attribute *kattr,
                                      char *buf, int index)
 {
-       struct device *dev = container_of(kobj->parent, struct device, kobj);
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct device *dev = kobj_to_dev(kobj->parent);
+       struct hid_device *hdev = to_hid_device(dev);
        struct wacom *wacom = hid_get_drvdata(hdev);
        u8 mode;
 
@@ -1241,8 +1241,8 @@ static ssize_t wacom_store_unpair_remote(struct kobject *kobj,
                                         const char *buf, size_t count)
 {
        unsigned char selector = 0;
-       struct device *dev = container_of(kobj->parent, struct device, kobj);
-       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct device *dev = kobj_to_dev(kobj->parent);
+       struct hid_device *hdev = to_hid_device(dev);
        struct wacom *wacom = hid_get_drvdata(hdev);
        int err;
 
@@ -1353,8 +1353,7 @@ static void wacom_clean_inputs(struct wacom *wacom)
                else
                        input_free_device(wacom->wacom_wac.pad_input);
        }
-       if (wacom->remote_dir)
-               kobject_put(wacom->remote_dir);
+       kobject_put(wacom->remote_dir);
        wacom->wacom_wac.pen_input = NULL;
        wacom->wacom_wac.touch_input = NULL;
        wacom->wacom_wac.pad_input = NULL;
index 01a4f05..99ef77f 100644 (file)
@@ -34,6 +34,9 @@
  */
 #define WACOM_CONTACT_AREA_SCALE 2607
 
+static void wacom_report_numbered_buttons(struct input_dev *input_dev,
+                               int button_count, int mask);
+
 /*
  * Percent of battery capacity for Graphire.
  * 8th value means AC online and show 100% capacity.
@@ -436,16 +439,142 @@ exit:
 static void wacom_intuos_schedule_prox_event(struct wacom_wac *wacom_wac)
 {
        struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
+       struct wacom_features *features = &wacom_wac->features;
        struct hid_report *r;
        struct hid_report_enum *re;
 
        re = &(wacom->hdev->report_enum[HID_FEATURE_REPORT]);
-       r = re->report_id_hash[WACOM_REPORT_INTUOSREAD];
+       if (features->type == INTUOSHT2)
+               r = re->report_id_hash[WACOM_REPORT_INTUOSHT2_ID];
+       else
+               r = re->report_id_hash[WACOM_REPORT_INTUOS_ID1];
        if (r) {
                hid_hw_request(wacom->hdev, r, HID_REQ_GET_REPORT);
        }
 }
 
+static int wacom_intuos_pad(struct wacom_wac *wacom)
+{
+       struct wacom_features *features = &wacom->features;
+       unsigned char *data = wacom->data;
+       struct input_dev *input = wacom->pad_input;
+       int i;
+       int buttons = 0, nbuttons = features->numbered_buttons;
+       int keys = 0, nkeys = 0;
+       int ring1 = 0, ring2 = 0;
+       int strip1 = 0, strip2 = 0;
+       bool prox = false;
+
+       /* pad packets. Works as a second tool and is always in prox */
+       if (!(data[0] == WACOM_REPORT_INTUOSPAD || data[0] == WACOM_REPORT_INTUOS5PAD ||
+             data[0] == WACOM_REPORT_CINTIQPAD))
+               return 0;
+
+       if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
+               buttons = (data[3] << 1) | (data[2] & 0x01);
+               ring1 = data[1];
+       } else if (features->type == DTK) {
+               buttons = data[6];
+       } else if (features->type == WACOM_13HD) {
+               buttons = (data[4] << 1) | (data[3] & 0x01);
+       } else if (features->type == WACOM_24HD) {
+               buttons = (data[8] << 8) | data[6];
+               ring1 = data[1];
+               ring2 = data[2];
+
+               /*
+                * Three "buttons" are available on the 24HD which are
+                * physically implemented as a touchstrip. Each button
+                * is approximately 3 bits wide with a 2 bit spacing.
+                * The raw touchstrip bits are stored at:
+                *    ((data[3] & 0x1f) << 8) | data[4])
+                */
+               nkeys = 3;
+               keys = ((data[3] & 0x1C) ? 1<<2 : 0) |
+                      ((data[4] & 0xE0) ? 1<<1 : 0) |
+                      ((data[4] & 0x07) ? 1<<0 : 0);
+       } else if (features->type == WACOM_27QHD) {
+               nkeys = 3;
+               keys = data[2] & 0x07;
+
+               input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
+               input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
+               input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
+       } else if (features->type == CINTIQ_HYBRID) {
+               /*
+                * Do not send hardware buttons under Android. They
+                * are already sent to the system through GPIO (and
+                * have different meaning).
+                *
+                * d-pad right  -> data[4] & 0x10
+                * d-pad up     -> data[4] & 0x20
+                * d-pad left   -> data[4] & 0x40
+                * d-pad down   -> data[4] & 0x80
+                * d-pad center -> data[3] & 0x01
+                */
+               buttons = (data[4] << 1) | (data[3] & 0x01);
+       } else if (features->type == CINTIQ_COMPANION_2) {
+               /* d-pad right  -> data[4] & 0x10
+                * d-pad up     -> data[4] & 0x20
+                * d-pad left   -> data[4] & 0x40
+                * d-pad down   -> data[4] & 0x80
+                * d-pad center -> data[3] & 0x01
+                */
+               buttons = ((data[2] >> 4) << 7) |
+                         ((data[1] & 0x04) << 6) |
+                         ((data[2] & 0x0F) << 2) |
+                         (data[1] & 0x03);
+       } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
+               /*
+                * ExpressKeys on Intuos5/Intuos Pro have a capacitive sensor in
+                * addition to the mechanical switch. Switch data is
+                * stored in data[4], capacitive data in data[5].
+                *
+                * Touch ring mode switch (data[3]) has no capacitive sensor
+                */
+               buttons = (data[4] << 1) | (data[3] & 0x01);
+               ring1 = data[2];
+       } else {
+               if (features->type == WACOM_21UX2 || features->type == WACOM_22HD) {
+                       buttons = (data[8] << 10) | ((data[7] & 0x01) << 9) |
+                                 (data[6] << 1) | (data[5] & 0x01);
+
+                       if (features->type == WACOM_22HD) {
+                               nkeys = 3;
+                               keys = data[9] & 0x07;
+                       }
+               } else {
+                       buttons = ((data[6] & 0x10) << 10) |
+                                 ((data[5] & 0x10) << 9)  |
+                                 ((data[6] & 0x0F) << 4)  |
+                                 (data[5] & 0x0F);
+               }
+               strip1 = ((data[1] & 0x1f) << 8) | data[2];
+               strip2 = ((data[3] & 0x1f) << 8) | data[4];
+       }
+
+       prox = (buttons & ~(~0 << nbuttons)) | (keys & ~(~0 << nkeys)) |
+              (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2;
+
+       wacom_report_numbered_buttons(input, nbuttons, buttons);
+
+       for (i = 0; i < nkeys; i++)
+               input_report_key(input, KEY_PROG1 + i, keys & (1 << i));
+
+       input_report_abs(input, ABS_RX, strip1);
+       input_report_abs(input, ABS_RY, strip2);
+
+       input_report_abs(input, ABS_WHEEL,    (ring1 & 0x80) ? (ring1 & 0x7f) : 0);
+       input_report_abs(input, ABS_THROTTLE, (ring2 & 0x80) ? (ring2 & 0x7f) : 0);
+
+       input_report_key(input, wacom->tool[1], prox ? 1 : 0);
+       input_report_abs(input, ABS_MISC, prox ? PAD_DEVICE_ID : 0);
+
+       input_event(input, EV_MSC, MSC_SERIAL, 0xffffffff);
+
+       return 1;
+}
+
 static int wacom_intuos_inout(struct wacom_wac *wacom)
 {
        struct wacom_features *features = &wacom->features;
@@ -755,19 +884,40 @@ static int wacom_remote_status_irq(struct wacom_wac *wacom_wac, size_t len)
        return 0;
 }
 
-static void wacom_intuos_general(struct wacom_wac *wacom)
+static int wacom_intuos_general(struct wacom_wac *wacom)
 {
        struct wacom_features *features = &wacom->features;
        unsigned char *data = wacom->data;
        struct input_dev *input = wacom->pen_input;
-       unsigned int t;
+       int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
+       unsigned char type = (data[1] >> 1) & 0x0F;
+       unsigned int x, y, distance, t;
 
-       /* general pen packet */
-       if ((data[1] & 0xb8) == 0xa0) {
-               t = (data[6] << 2) | ((data[7] >> 6) & 3);
-               if (features->pressure_max == 2047) {
-                       t = (t << 1) | (data[1] & 1);
-               }
+       if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_CINTIQ &&
+               data[0] != WACOM_REPORT_INTUOS_PEN)
+               return 0;
+
+       x = (be16_to_cpup((__be16 *)&data[2]) << 1) | ((data[9] >> 1) & 1);
+       y = (be16_to_cpup((__be16 *)&data[4]) << 1) | (data[9] & 1);
+       distance = data[9] >> 2;
+       if (features->type < INTUOS3S) {
+               x >>= 1;
+               y >>= 1;
+               distance >>= 1;
+       }
+       input_report_abs(input, ABS_X, x);
+       input_report_abs(input, ABS_Y, y);
+       input_report_abs(input, ABS_DISTANCE, distance);
+
+       switch (type) {
+       case 0x00:
+       case 0x01:
+       case 0x02:
+       case 0x03:
+               /* general pen packet */
+               t = (data[6] << 3) | ((data[7] & 0xC0) >> 5) | (data[1] & 1);
+               if (features->pressure_max < 2047)
+                       t >>= 1;
                input_report_abs(input, ABS_PRESSURE, t);
                if (features->type != INTUOSHT2) {
                    input_report_abs(input, ABS_TILT_X,
@@ -777,29 +927,112 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
                input_report_key(input, BTN_STYLUS, data[1] & 2);
                input_report_key(input, BTN_STYLUS2, data[1] & 4);
                input_report_key(input, BTN_TOUCH, t > 10);
-       }
+               break;
 
-       /* airbrush second packet */
-       if ((data[1] & 0xbc) == 0xb4) {
+       case 0x0a:
+               /* airbrush second packet */
                input_report_abs(input, ABS_WHEEL,
                                (data[6] << 2) | ((data[7] >> 6) & 3));
                input_report_abs(input, ABS_TILT_X,
                                 (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64);
                input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64);
+               break;
+
+       case 0x05:
+               /* Rotation packet */
+               if (features->type >= INTUOS3S) {
+                       /* I3 marker pen rotation */
+                       t = (data[6] << 3) | ((data[7] >> 5) & 7);
+                       t = (data[7] & 0x20) ? ((t > 900) ? ((t-1) / 2 - 1350) :
+                               ((t-1) / 2 + 450)) : (450 - t / 2) ;
+                       input_report_abs(input, ABS_Z, t);
+               } else {
+                       /* 4D mouse 2nd packet */
+                       t = (data[6] << 3) | ((data[7] >> 5) & 7);
+                       input_report_abs(input, ABS_RZ, (data[7] & 0x20) ?
+                               ((t - 1) / 2) : -t / 2);
+               }
+               break;
+
+       case 0x04:
+               /* 4D mouse 1st packet */
+               input_report_key(input, BTN_LEFT,   data[8] & 0x01);
+               input_report_key(input, BTN_MIDDLE, data[8] & 0x02);
+               input_report_key(input, BTN_RIGHT,  data[8] & 0x04);
+
+               input_report_key(input, BTN_SIDE,   data[8] & 0x20);
+               input_report_key(input, BTN_EXTRA,  data[8] & 0x10);
+               t = (data[6] << 2) | ((data[7] >> 6) & 3);
+               input_report_abs(input, ABS_THROTTLE, (data[8] & 0x08) ? -t : t);
+               break;
+
+       case 0x06:
+               /* I4 mouse */
+               input_report_key(input, BTN_LEFT,   data[6] & 0x01);
+               input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
+               input_report_key(input, BTN_RIGHT,  data[6] & 0x04);
+               input_report_rel(input, REL_WHEEL, ((data[7] & 0x80) >> 7)
+                                - ((data[7] & 0x40) >> 6));
+               input_report_key(input, BTN_SIDE,   data[6] & 0x08);
+               input_report_key(input, BTN_EXTRA,  data[6] & 0x10);
+
+               input_report_abs(input, ABS_TILT_X,
+                       (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64);
+               input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64);
+               break;
+
+       case 0x08:
+               if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
+                       /* 2D mouse packet */
+                       input_report_key(input, BTN_LEFT,   data[8] & 0x04);
+                       input_report_key(input, BTN_MIDDLE, data[8] & 0x08);
+                       input_report_key(input, BTN_RIGHT,  data[8] & 0x10);
+                       input_report_rel(input, REL_WHEEL, (data[8] & 0x01)
+                                        - ((data[8] & 0x02) >> 1));
+
+                       /* I3 2D mouse side buttons */
+                       if (features->type >= INTUOS3S && features->type <= INTUOS3L) {
+                               input_report_key(input, BTN_SIDE,   data[8] & 0x40);
+                               input_report_key(input, BTN_EXTRA,  data[8] & 0x20);
+                       }
+               }
+               else if (wacom->tool[idx] == BTN_TOOL_LENS) {
+                       /* Lens cursor packets */
+                       input_report_key(input, BTN_LEFT,   data[8] & 0x01);
+                       input_report_key(input, BTN_MIDDLE, data[8] & 0x02);
+                       input_report_key(input, BTN_RIGHT,  data[8] & 0x04);
+                       input_report_key(input, BTN_SIDE,   data[8] & 0x10);
+                       input_report_key(input, BTN_EXTRA,  data[8] & 0x08);
+               }
+               break;
+
+       case 0x07:
+       case 0x09:
+       case 0x0b:
+       case 0x0c:
+       case 0x0d:
+       case 0x0e:
+       case 0x0f:
+               /* unhandled */
+               break;
        }
+
+       input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
+       input_report_key(input, wacom->tool[idx], 1);
+       input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
+       wacom->reporting_data = true;
+       return 2;
 }
 
 static int wacom_intuos_irq(struct wacom_wac *wacom)
 {
-       struct wacom_features *features = &wacom->features;
        unsigned char *data = wacom->data;
        struct input_dev *input = wacom->pen_input;
-       unsigned int t;
-       int idx = 0, result;
+       int result;
 
        if (data[0] != WACOM_REPORT_PENABLED &&
-           data[0] != WACOM_REPORT_INTUOSREAD &&
-           data[0] != WACOM_REPORT_INTUOSWRITE &&
+           data[0] != WACOM_REPORT_INTUOS_ID1 &&
+           data[0] != WACOM_REPORT_INTUOS_ID2 &&
            data[0] != WACOM_REPORT_INTUOSPAD &&
            data[0] != WACOM_REPORT_INTUOS_PEN &&
            data[0] != WACOM_REPORT_CINTIQ &&
@@ -810,339 +1043,22 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                 return 0;
        }
 
-       /* tool number */
-       if (features->type == INTUOS)
-               idx = data[1] & 0x01;
-
-       /* pad packets. Works as a second tool and is always in prox */
-       if (data[0] == WACOM_REPORT_INTUOSPAD || data[0] == WACOM_REPORT_INTUOS5PAD ||
-           data[0] == WACOM_REPORT_CINTIQPAD) {
-               input = wacom->pad_input;
-               if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
-                       input_report_key(input, BTN_0, (data[2] & 0x01));
-                       input_report_key(input, BTN_1, (data[3] & 0x01));
-                       input_report_key(input, BTN_2, (data[3] & 0x02));
-                       input_report_key(input, BTN_3, (data[3] & 0x04));
-                       input_report_key(input, BTN_4, (data[3] & 0x08));
-                       input_report_key(input, BTN_5, (data[3] & 0x10));
-                       input_report_key(input, BTN_6, (data[3] & 0x20));
-                       if (data[1] & 0x80) {
-                               input_report_abs(input, ABS_WHEEL, (data[1] & 0x7f));
-                       } else {
-                               /* Out of proximity, clear wheel value. */
-                               input_report_abs(input, ABS_WHEEL, 0);
-                       }
-                       if (features->type != INTUOS4S) {
-                               input_report_key(input, BTN_7, (data[3] & 0x40));
-                               input_report_key(input, BTN_8, (data[3] & 0x80));
-                       }
-                       if (data[1] | (data[2] & 0x01) | data[3]) {
-                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
-                       } else {
-                               input_report_abs(input, ABS_MISC, 0);
-                       }
-               } else if (features->type == DTK) {
-                       input_report_key(input, BTN_0, (data[6] & 0x01));
-                       input_report_key(input, BTN_1, (data[6] & 0x02));
-                       input_report_key(input, BTN_2, (data[6] & 0x04));
-                       input_report_key(input, BTN_3, (data[6] & 0x08));
-                       input_report_key(input, BTN_4, (data[6] & 0x10));
-                       input_report_key(input, BTN_5, (data[6] & 0x20));
-                       if (data[6] & 0x3f) {
-                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
-                       } else {
-                               input_report_abs(input, ABS_MISC, 0);
-                       }
-               } else if (features->type == WACOM_13HD) {
-                       input_report_key(input, BTN_0, (data[3] & 0x01));
-                       input_report_key(input, BTN_1, (data[4] & 0x01));
-                       input_report_key(input, BTN_2, (data[4] & 0x02));
-                       input_report_key(input, BTN_3, (data[4] & 0x04));
-                       input_report_key(input, BTN_4, (data[4] & 0x08));
-                       input_report_key(input, BTN_5, (data[4] & 0x10));
-                       input_report_key(input, BTN_6, (data[4] & 0x20));
-                       input_report_key(input, BTN_7, (data[4] & 0x40));
-                       input_report_key(input, BTN_8, (data[4] & 0x80));
-                       if ((data[3] & 0x01) | data[4]) {
-                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
-                       } else {
-                               input_report_abs(input, ABS_MISC, 0);
-                       }
-               } else if (features->type == WACOM_24HD) {
-                       input_report_key(input, BTN_0, (data[6] & 0x01));
-                       input_report_key(input, BTN_1, (data[6] & 0x02));
-                       input_report_key(input, BTN_2, (data[6] & 0x04));
-                       input_report_key(input, BTN_3, (data[6] & 0x08));
-                       input_report_key(input, BTN_4, (data[6] & 0x10));
-                       input_report_key(input, BTN_5, (data[6] & 0x20));
-                       input_report_key(input, BTN_6, (data[6] & 0x40));
-                       input_report_key(input, BTN_7, (data[6] & 0x80));
-                       input_report_key(input, BTN_8, (data[8] & 0x01));
-                       input_report_key(input, BTN_9, (data[8] & 0x02));
-                       input_report_key(input, BTN_A, (data[8] & 0x04));
-                       input_report_key(input, BTN_B, (data[8] & 0x08));
-                       input_report_key(input, BTN_C, (data[8] & 0x10));
-                       input_report_key(input, BTN_X, (data[8] & 0x20));
-                       input_report_key(input, BTN_Y, (data[8] & 0x40));
-                       input_report_key(input, BTN_Z, (data[8] & 0x80));
-
-                       /*
-                        * Three "buttons" are available on the 24HD which are
-                        * physically implemented as a touchstrip. Each button
-                        * is approximately 3 bits wide with a 2 bit spacing.
-                        * The raw touchstrip bits are stored at:
-                        *    ((data[3] & 0x1f) << 8) | data[4])
-                        */
-                       input_report_key(input, KEY_PROG1, data[4] & 0x07);
-                       input_report_key(input, KEY_PROG2, data[4] & 0xE0);
-                       input_report_key(input, KEY_PROG3, data[3] & 0x1C);
-
-                       if (data[1] & 0x80) {
-                               input_report_abs(input, ABS_WHEEL, (data[1] & 0x7f));
-                       } else {
-                               /* Out of proximity, clear wheel value. */
-                               input_report_abs(input, ABS_WHEEL, 0);
-                       }
-
-                       if (data[2] & 0x80) {
-                               input_report_abs(input, ABS_THROTTLE, (data[2] & 0x7f));
-                       } else {
-                               /* Out of proximity, clear second wheel value. */
-                               input_report_abs(input, ABS_THROTTLE, 0);
-                       }
-
-                       if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) {
-                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
-                       } else {
-                               input_report_abs(input, ABS_MISC, 0);
-                       }
-               } else if (features->type == WACOM_27QHD) {
-                       input_report_key(input, KEY_PROG1, data[2] & 0x01);
-                       input_report_key(input, KEY_PROG2, data[2] & 0x02);
-                       input_report_key(input, KEY_PROG3, data[2] & 0x04);
-
-                       input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
-                       input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
-                       input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
-                       if ((data[2] & 0x07) | data[4] | data[5] | data[6] | data[7] | data[8] | data[9]) {
-                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
-                       } else {
-                               input_report_abs(input, ABS_MISC, 0);
-                       }
-               } else if (features->type == CINTIQ_HYBRID) {
-                       /*
-                        * Do not send hardware buttons under Android. They
-                        * are already sent to the system through GPIO (and
-                        * have different meaning).
-                        */
-                       input_report_key(input, BTN_1, (data[4] & 0x01));
-                       input_report_key(input, BTN_2, (data[4] & 0x02));
-                       input_report_key(input, BTN_3, (data[4] & 0x04));
-                       input_report_key(input, BTN_4, (data[4] & 0x08));
-
-                       input_report_key(input, BTN_5, (data[4] & 0x10));  /* Right  */
-                       input_report_key(input, BTN_6, (data[4] & 0x20));  /* Up     */
-                       input_report_key(input, BTN_7, (data[4] & 0x40));  /* Left   */
-                       input_report_key(input, BTN_8, (data[4] & 0x80));  /* Down   */
-                       input_report_key(input, BTN_0, (data[3] & 0x01));  /* Center */
-
-                       if (data[4] | (data[3] & 0x01)) {
-                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
-                       } else {
-                               input_report_abs(input, ABS_MISC, 0);
-                       }
-
-               } else if (features->type == CINTIQ_COMPANION_2) {
-                       input_report_key(input, BTN_1, (data[1] & 0x02));
-                       input_report_key(input, BTN_2, (data[2] & 0x01));
-                       input_report_key(input, BTN_3, (data[2] & 0x02));
-                       input_report_key(input, BTN_4, (data[2] & 0x04));
-                       input_report_key(input, BTN_5, (data[2] & 0x08));
-                       input_report_key(input, BTN_6, (data[1] & 0x04));
-
-                       input_report_key(input, BTN_7, (data[2] & 0x10));  /* Right  */
-                       input_report_key(input, BTN_8, (data[2] & 0x20));  /* Up         */
-                       input_report_key(input, BTN_9, (data[2] & 0x40));  /* Left   */
-                       input_report_key(input, BTN_A, (data[2] & 0x80));  /* Down   */
-                       input_report_key(input, BTN_0, (data[1] & 0x01));  /* Center */
-
-                       if (data[2] | (data[1] & 0x07)) {
-                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
-                       } else {
-                               input_report_abs(input, ABS_MISC, 0);
-                       }
-
-               } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
-                       int i;
-
-                       /* Touch ring mode switch has no capacitive sensor */
-                       input_report_key(input, BTN_0, (data[3] & 0x01));
-
-                       /*
-                        * ExpressKeys on Intuos5/Intuos Pro have a capacitive sensor in
-                        * addition to the mechanical switch. Switch data is
-                        * stored in data[4], capacitive data in data[5].
-                        */
-                       for (i = 0; i < 8; i++)
-                               input_report_key(input, BTN_1 + i, data[4] & (1 << i));
-
-                       if (data[2] & 0x80) {
-                               input_report_abs(input, ABS_WHEEL, (data[2] & 0x7f));
-                       } else {
-                               /* Out of proximity, clear wheel value. */
-                               input_report_abs(input, ABS_WHEEL, 0);
-                       }
-
-                       if (data[2] | (data[3] & 0x01) | data[4] | data[5]) {
-                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
-                       } else {
-                               input_report_abs(input, ABS_MISC, 0);
-                       }
-               } else {
-                       if (features->type == WACOM_21UX2 || features->type == WACOM_22HD) {
-                               input_report_key(input, BTN_0, (data[5] & 0x01));
-                               input_report_key(input, BTN_1, (data[6] & 0x01));
-                               input_report_key(input, BTN_2, (data[6] & 0x02));
-                               input_report_key(input, BTN_3, (data[6] & 0x04));
-                               input_report_key(input, BTN_4, (data[6] & 0x08));
-                               input_report_key(input, BTN_5, (data[6] & 0x10));
-                               input_report_key(input, BTN_6, (data[6] & 0x20));
-                               input_report_key(input, BTN_7, (data[6] & 0x40));
-                               input_report_key(input, BTN_8, (data[6] & 0x80));
-                               input_report_key(input, BTN_9, (data[7] & 0x01));
-                               input_report_key(input, BTN_A, (data[8] & 0x01));
-                               input_report_key(input, BTN_B, (data[8] & 0x02));
-                               input_report_key(input, BTN_C, (data[8] & 0x04));
-                               input_report_key(input, BTN_X, (data[8] & 0x08));
-                               input_report_key(input, BTN_Y, (data[8] & 0x10));
-                               input_report_key(input, BTN_Z, (data[8] & 0x20));
-                               input_report_key(input, BTN_BASE, (data[8] & 0x40));
-                               input_report_key(input, BTN_BASE2, (data[8] & 0x80));
-
-                               if (features->type == WACOM_22HD) {
-                                       input_report_key(input, KEY_PROG1, data[9] & 0x01);
-                                       input_report_key(input, KEY_PROG2, data[9] & 0x02);
-                                       input_report_key(input, KEY_PROG3, data[9] & 0x04);
-                               }
-                       } else {
-                               input_report_key(input, BTN_0, (data[5] & 0x01));
-                               input_report_key(input, BTN_1, (data[5] & 0x02));
-                               input_report_key(input, BTN_2, (data[5] & 0x04));
-                               input_report_key(input, BTN_3, (data[5] & 0x08));
-                               input_report_key(input, BTN_4, (data[6] & 0x01));
-                               input_report_key(input, BTN_5, (data[6] & 0x02));
-                               input_report_key(input, BTN_6, (data[6] & 0x04));
-                               input_report_key(input, BTN_7, (data[6] & 0x08));
-                               input_report_key(input, BTN_8, (data[5] & 0x10));
-                               input_report_key(input, BTN_9, (data[6] & 0x10));
-                       }
-                       input_report_abs(input, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]);
-                       input_report_abs(input, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]);
-
-                       if ((data[5] & 0x1f) | data[6] | (data[1] & 0x1f) |
-                               data[2] | (data[3] & 0x1f) | data[4] | data[8] |
-                               (data[7] & 0x01)) {
-                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
-                       } else {
-                               input_report_abs(input, ABS_MISC, 0);
-                       }
-               }
-                return 1;
-       }
+       /* process pad events */
+       result = wacom_intuos_pad(wacom);
+       if (result)
+               return result;
 
        /* process in/out prox events */
        result = wacom_intuos_inout(wacom);
        if (result)
-                return result - 1;
-
-       if (features->type >= INTUOS3S) {
-               input_report_abs(input, ABS_X, (data[2] << 9) | (data[3] << 1) | ((data[9] >> 1) & 1));
-               input_report_abs(input, ABS_Y, (data[4] << 9) | (data[5] << 1) | (data[9] & 1));
-               input_report_abs(input, ABS_DISTANCE, ((data[9] >> 2) & 0x3f));
-       } else {
-               input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[2]));
-               input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[4]));
-               input_report_abs(input, ABS_DISTANCE, ((data[9] >> 3) & 0x1f));
-       }
+               return result - 1;
 
        /* process general packets */
-       wacom_intuos_general(wacom);
-
-       /* 4D mouse, 2D mouse, marker pen rotation, tilt mouse, or Lens cursor packets */
-       if ((data[1] & 0xbc) == 0xa8 || (data[1] & 0xbe) == 0xb0 || (data[1] & 0xbc) == 0xac) {
-
-               if (data[1] & 0x02) {
-                       /* Rotation packet */
-                       if (features->type >= INTUOS3S) {
-                               /* I3 marker pen rotation */
-                               t = (data[6] << 3) | ((data[7] >> 5) & 7);
-                               t = (data[7] & 0x20) ? ((t > 900) ? ((t-1) / 2 - 1350) :
-                                       ((t-1) / 2 + 450)) : (450 - t / 2) ;
-                               input_report_abs(input, ABS_Z, t);
-                       } else {
-                               /* 4D mouse rotation packet */
-                               t = (data[6] << 3) | ((data[7] >> 5) & 7);
-                               input_report_abs(input, ABS_RZ, (data[7] & 0x20) ?
-                                       ((t - 1) / 2) : -t / 2);
-                       }
-
-               } else if (!(data[1] & 0x10) && features->type < INTUOS3S) {
-                       /* 4D mouse packet */
-                       input_report_key(input, BTN_LEFT,   data[8] & 0x01);
-                       input_report_key(input, BTN_MIDDLE, data[8] & 0x02);
-                       input_report_key(input, BTN_RIGHT,  data[8] & 0x04);
-
-                       input_report_key(input, BTN_SIDE,   data[8] & 0x20);
-                       input_report_key(input, BTN_EXTRA,  data[8] & 0x10);
-                       t = (data[6] << 2) | ((data[7] >> 6) & 3);
-                       input_report_abs(input, ABS_THROTTLE, (data[8] & 0x08) ? -t : t);
-
-               } else if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
-                       /* I4 mouse */
-                       if (features->type >= INTUOS4S && features->type <= INTUOSPL) {
-                               input_report_key(input, BTN_LEFT,   data[6] & 0x01);
-                               input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
-                               input_report_key(input, BTN_RIGHT,  data[6] & 0x04);
-                               input_report_rel(input, REL_WHEEL, ((data[7] & 0x80) >> 7)
-                                                - ((data[7] & 0x40) >> 6));
-                               input_report_key(input, BTN_SIDE,   data[6] & 0x08);
-                               input_report_key(input, BTN_EXTRA,  data[6] & 0x10);
-
-                               input_report_abs(input, ABS_TILT_X,
-                                       (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64);
-                               input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64);
-                       } else {
-                               /* 2D mouse packet */
-                               input_report_key(input, BTN_LEFT,   data[8] & 0x04);
-                               input_report_key(input, BTN_MIDDLE, data[8] & 0x08);
-                               input_report_key(input, BTN_RIGHT,  data[8] & 0x10);
-                               input_report_rel(input, REL_WHEEL, (data[8] & 0x01)
-                                                - ((data[8] & 0x02) >> 1));
-
-                               /* I3 2D mouse side buttons */
-                               if (features->type >= INTUOS3S && features->type <= INTUOS3L) {
-                                       input_report_key(input, BTN_SIDE,   data[8] & 0x40);
-                                       input_report_key(input, BTN_EXTRA,  data[8] & 0x20);
-                               }
-                       }
-               } else if ((features->type < INTUOS3S || features->type == INTUOS3L ||
-                               features->type == INTUOS4L || features->type == INTUOS5L ||
-                               features->type == INTUOSPL) &&
-                          wacom->tool[idx] == BTN_TOOL_LENS) {
-                       /* Lens cursor packets */
-                       input_report_key(input, BTN_LEFT,   data[8] & 0x01);
-                       input_report_key(input, BTN_MIDDLE, data[8] & 0x02);
-                       input_report_key(input, BTN_RIGHT,  data[8] & 0x04);
-                       input_report_key(input, BTN_SIDE,   data[8] & 0x10);
-                       input_report_key(input, BTN_EXTRA,  data[8] & 0x08);
-               }
-       }
+       result = wacom_intuos_general(wacom);
+       if (result)
+               return result - 1;
 
-       input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
-       input_report_key(input, wacom->tool[idx], 1);
-       input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
-       wacom->reporting_data = true;
-       return 1;
+       return 0;
 }
 
 static int int_dist(int x1, int y1, int x2, int y2)
@@ -2509,7 +2425,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
                features->quirks |= WACOM_QUIRK_BATTERY;
 
        /* quirk for bamboo touch with 2 low res touches */
-       if (features->type == BAMBOO_PT &&
+       if ((features->type == BAMBOO_PT || features->type == BAMBOO_TOUCH) &&
            features->pktlen == WACOM_PKGLEN_BBTOUCH) {
                features->x_max <<= 5;
                features->y_max <<= 5;
@@ -2806,6 +2722,19 @@ static void wacom_setup_numbered_buttons(struct input_dev *input_dev,
                __set_bit(BTN_BASE + (i-16), input_dev->keybit);
 }
 
+static void wacom_report_numbered_buttons(struct input_dev *input_dev,
+                               int button_count, int mask)
+{
+       int i;
+
+       for (i = 0; i < button_count && i < 10; i++)
+               input_report_key(input_dev, BTN_0 + i, mask & (1 << i));
+       for (i = 10; i < button_count && i < 16; i++)
+               input_report_key(input_dev, BTN_A + (i-10), mask & (1 << i));
+       for (i = 16; i < button_count && i < 18; i++)
+               input_report_key(input_dev, BTN_BASE + (i-16), mask & (1 << i));
+}
+
 int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
                                   struct wacom_wac *wacom_wac)
 {
index 877c24a..25baa7f 100644 (file)
@@ -47,8 +47,8 @@
 /* wacom data packet report IDs */
 #define WACOM_REPORT_PENABLED          2
 #define WACOM_REPORT_PENABLED_BT       3
-#define WACOM_REPORT_INTUOSREAD                5
-#define WACOM_REPORT_INTUOSWRITE       6
+#define WACOM_REPORT_INTUOS_ID1                5
+#define WACOM_REPORT_INTUOS_ID2                6
 #define WACOM_REPORT_INTUOSPAD         12
 #define WACOM_REPORT_INTUOS5PAD                3
 #define WACOM_REPORT_DTUSPAD           21
@@ -70,6 +70,7 @@
 #define WACOM_REPORT_DEVICE_LIST       16
 #define WACOM_REPORT_INTUOS_PEN                16
 #define WACOM_REPORT_REMOTE            17
+#define WACOM_REPORT_INTUOSHT2_ID      8
 
 /* device quirks */
 #define WACOM_QUIRK_BBTOUCH_LOWRES     0x0001
index e24c2b6..7b0aa82 100644 (file)
@@ -126,6 +126,7 @@ config I2C_I801
            Sunrise Point-LP (PCH)
            DNV (SOC)
            Broxton (SOC)
+           Lewisburg (PCH)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
index c306751..f62d697 100644 (file)
@@ -62,6 +62,8 @@
  * Sunrise Point-LP (PCH)      0x9d23  32      hard    yes     yes     yes
  * DNV (SOC)                   0x19df  32      hard    yes     yes     yes
  * Broxton (SOC)               0x5ad4  32      hard    yes     yes     yes
+ * Lewisburg (PCH)             0xa1a3  32      hard    yes     yes     yes
+ * Lewisburg Supersku (PCH)    0xa223  32      hard    yes     yes     yes
  *
  * Features supported by this driver:
  * Software PEC                                no
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS      0x9d23
 #define PCI_DEVICE_ID_INTEL_DNV_SMBUS                  0x19df
 #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS              0x5ad4
+#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS            0xa1a3
+#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS       0xa223
 
 struct i801_mux_config {
        char *gpio_chip;
@@ -869,6 +873,8 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
        { 0, }
 };
 
index 1e4d99d..9bb0b05 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
 #include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_data/i2c-imx.h>
 #include <linux/platform_device.h>
 #include <linux/sched.h>
index e23a7b0..0b20449 100644 (file)
@@ -662,8 +662,10 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
 
 static void xiic_start_xfer(struct xiic_i2c *i2c)
 {
-
+       spin_lock(&i2c->lock);
+       xiic_reinit(i2c);
        __xiic_start_xfer(i2c);
+       spin_unlock(&i2c->lock);
 }
 
 static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
index 040af5c..ba8eb08 100644 (file)
@@ -715,7 +715,7 @@ static int i2c_device_probe(struct device *dev)
                if (wakeirq > 0 && wakeirq != client->irq)
                        status = dev_pm_set_dedicated_wake_irq(dev, wakeirq);
                else if (client->irq > 0)
-                       status = dev_pm_set_wake_irq(dev, wakeirq);
+                       status = dev_pm_set_wake_irq(dev, client->irq);
                else
                        status = 0;
 
index eea0c79..4d960d3 100644 (file)
 #define AD7795_CH_AIN1M_AIN1M  8 /* AIN1(-) - AIN1(-) */
 
 /* ID Register Bit Designations (AD7793_REG_ID) */
-#define AD7785_ID              0xB
+#define AD7785_ID              0x3
 #define AD7792_ID              0xA
 #define AD7793_ID              0xB
 #define AD7794_ID              0xF
index 599cde3..b10f629 100644 (file)
 
 #define DEFAULT_SAMPLE_TIME            1000
 
+/* V at 25°C of 696 mV */
+#define VF610_VTEMP25_3V0              950
+/* V at 25°C of 699 mV */
+#define VF610_VTEMP25_3V3              867
+/* Typical sensor slope coefficient at all temperatures */
+#define VF610_TEMP_SLOPE_COEFF         1840
+
 enum clk_sel {
        VF610_ADCIOC_BUSCLK_SET,
        VF610_ADCIOC_ALTCLK_SET,
@@ -197,6 +204,8 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
                adc_feature->clk_div = 8;
        }
 
+       adck_rate = ipg_rate / adc_feature->clk_div;
+
        /*
         * Determine the long sample time adder value to be used based
         * on the default minimum sample time provided.
@@ -221,7 +230,6 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
         * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
         * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles
         */
-       adck_rate = ipg_rate / info->adc_feature.clk_div;
        for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
                info->sample_freq_avail[i] =
                        adck_rate / (6 + vf610_hw_avgs[i] *
@@ -663,11 +671,13 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
                        break;
                case IIO_TEMP:
                        /*
-                       * Calculate in degree Celsius times 1000
-                       * Using sensor slope of 1.84 mV/°C and
-                       * V at 25°C of 696 mV
-                       */
-                       *val = 25000 - ((int)info->value - 864) * 1000000 / 1840;
+                        * Calculate in degree Celsius times 1000
+                        * Using the typical sensor slope of 1.84 mV/°C
+                        * and VREFH_ADC at 3.3V, V at 25°C of 699 mV
+                        */
+                       *val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) *
+                                       1000000 / VF610_TEMP_SLOPE_COEFF;
+
                        break;
                default:
                        mutex_unlock(&indio_dev->mlock);
index 0370624..02e636a 100644 (file)
@@ -841,6 +841,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
                        case XADC_REG_VCCINT:
                        case XADC_REG_VCCAUX:
                        case XADC_REG_VREFP:
+                       case XADC_REG_VREFN:
                        case XADC_REG_VCCBRAM:
                        case XADC_REG_VCCPINT:
                        case XADC_REG_VCCPAUX:
index 9e4d2c1..81ca008 100644 (file)
@@ -113,12 +113,16 @@ enum ad5064_type {
        ID_AD5065,
        ID_AD5628_1,
        ID_AD5628_2,
+       ID_AD5629_1,
+       ID_AD5629_2,
        ID_AD5648_1,
        ID_AD5648_2,
        ID_AD5666_1,
        ID_AD5666_2,
        ID_AD5668_1,
        ID_AD5668_2,
+       ID_AD5669_1,
+       ID_AD5669_2,
 };
 
 static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
@@ -291,7 +295,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
        { },
 };
 
-#define AD5064_CHANNEL(chan, addr, bits) {                     \
+#define AD5064_CHANNEL(chan, addr, bits, _shift) {             \
        .type = IIO_VOLTAGE,                                    \
        .indexed = 1,                                           \
        .output = 1,                                            \
@@ -303,36 +307,39 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
                .sign = 'u',                                    \
                .realbits = (bits),                             \
                .storagebits = 16,                              \
-               .shift = 20 - bits,                             \
+               .shift = (_shift),                              \
        },                                                      \
        .ext_info = ad5064_ext_info,                            \
 }
 
-#define DECLARE_AD5064_CHANNELS(name, bits) \
+#define DECLARE_AD5064_CHANNELS(name, bits, shift) \
 const struct iio_chan_spec name[] = { \
-       AD5064_CHANNEL(0, 0, bits), \
-       AD5064_CHANNEL(1, 1, bits), \
-       AD5064_CHANNEL(2, 2, bits), \
-       AD5064_CHANNEL(3, 3, bits), \
-       AD5064_CHANNEL(4, 4, bits), \
-       AD5064_CHANNEL(5, 5, bits), \
-       AD5064_CHANNEL(6, 6, bits), \
-       AD5064_CHANNEL(7, 7, bits), \
+       AD5064_CHANNEL(0, 0, bits, shift), \
+       AD5064_CHANNEL(1, 1, bits, shift), \
+       AD5064_CHANNEL(2, 2, bits, shift), \
+       AD5064_CHANNEL(3, 3, bits, shift), \
+       AD5064_CHANNEL(4, 4, bits, shift), \
+       AD5064_CHANNEL(5, 5, bits, shift), \
+       AD5064_CHANNEL(6, 6, bits, shift), \
+       AD5064_CHANNEL(7, 7, bits, shift), \
 }
 
-#define DECLARE_AD5065_CHANNELS(name, bits) \
+#define DECLARE_AD5065_CHANNELS(name, bits, shift) \
 const struct iio_chan_spec name[] = { \
-       AD5064_CHANNEL(0, 0, bits), \
-       AD5064_CHANNEL(1, 3, bits), \
+       AD5064_CHANNEL(0, 0, bits, shift), \
+       AD5064_CHANNEL(1, 3, bits, shift), \
 }
 
-static DECLARE_AD5064_CHANNELS(ad5024_channels, 12);
-static DECLARE_AD5064_CHANNELS(ad5044_channels, 14);
-static DECLARE_AD5064_CHANNELS(ad5064_channels, 16);
+static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8);
+static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6);
+static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4);
 
-static DECLARE_AD5065_CHANNELS(ad5025_channels, 12);
-static DECLARE_AD5065_CHANNELS(ad5045_channels, 14);
-static DECLARE_AD5065_CHANNELS(ad5065_channels, 16);
+static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8);
+static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6);
+static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4);
+
+static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4);
+static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0);
 
 static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
        [ID_AD5024] = {
@@ -382,6 +389,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
                .channels = ad5024_channels,
                .num_channels = 8,
        },
+       [ID_AD5629_1] = {
+               .shared_vref = true,
+               .internal_vref = 2500000,
+               .channels = ad5629_channels,
+               .num_channels = 8,
+       },
+       [ID_AD5629_2] = {
+               .shared_vref = true,
+               .internal_vref = 5000000,
+               .channels = ad5629_channels,
+               .num_channels = 8,
+       },
        [ID_AD5648_1] = {
                .shared_vref = true,
                .internal_vref = 2500000,
@@ -418,6 +437,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
                .channels = ad5064_channels,
                .num_channels = 8,
        },
+       [ID_AD5669_1] = {
+               .shared_vref = true,
+               .internal_vref = 2500000,
+               .channels = ad5669_channels,
+               .num_channels = 8,
+       },
+       [ID_AD5669_2] = {
+               .shared_vref = true,
+               .internal_vref = 5000000,
+               .channels = ad5669_channels,
+               .num_channels = 8,
+       },
 };
 
 static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
@@ -597,10 +628,16 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
        unsigned int addr, unsigned int val)
 {
        struct i2c_client *i2c = to_i2c_client(st->dev);
+       int ret;
 
        st->data.i2c[0] = (cmd << 4) | addr;
        put_unaligned_be16(val, &st->data.i2c[1]);
-       return i2c_master_send(i2c, st->data.i2c, 3);
+
+       ret = i2c_master_send(i2c, st->data.i2c, 3);
+       if (ret < 0)
+               return ret;
+
+       return 0;
 }
 
 static int ad5064_i2c_probe(struct i2c_client *i2c,
@@ -616,12 +653,12 @@ static int ad5064_i2c_remove(struct i2c_client *i2c)
 }
 
 static const struct i2c_device_id ad5064_i2c_ids[] = {
-       {"ad5629-1", ID_AD5628_1},
-       {"ad5629-2", ID_AD5628_2},
-       {"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */
-       {"ad5669-1", ID_AD5668_1},
-       {"ad5669-2", ID_AD5668_2},
-       {"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */
+       {"ad5629-1", ID_AD5629_1},
+       {"ad5629-2", ID_AD5629_2},
+       {"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */
+       {"ad5669-1", ID_AD5669_1},
+       {"ad5669-2", ID_AD5669_2},
+       {"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */
        {}
 };
 MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
index 12128d1..71991b5 100644 (file)
@@ -50,10 +50,10 @@ static int si7020_read_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
-               ret = i2c_smbus_read_word_data(*client,
-                                              chan->type == IIO_TEMP ?
-                                              SI7020CMD_TEMP_HOLD :
-                                              SI7020CMD_RH_HOLD);
+               ret = i2c_smbus_read_word_swapped(*client,
+                                                 chan->type == IIO_TEMP ?
+                                                 SI7020CMD_TEMP_HOLD :
+                                                 SI7020CMD_RH_HOLD);
                if (ret < 0)
                        return ret;
                *val = ret >> 2;
index 44a077f..f174ce0 100644 (file)
@@ -84,12 +84,15 @@ void __init gic_dist_config(void __iomem *base, int gic_irqs,
                writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i);
 
        /*
-        * Disable all interrupts.  Leave the PPI and SGIs alone
-        * as they are enabled by redistributor registers.
+        * Deactivate and disable all SPIs. Leave the PPI and SGIs
+        * alone as they are in the redistributor registers on GICv3.
         */
-       for (i = 32; i < gic_irqs; i += 32)
+       for (i = 32; i < gic_irqs; i += 32) {
                writel_relaxed(GICD_INT_EN_CLR_X32,
-                                       base + GIC_DIST_ENABLE_CLEAR + i / 8);
+                              base + GIC_DIST_ACTIVE_CLEAR + i / 8);
+               writel_relaxed(GICD_INT_EN_CLR_X32,
+                              base + GIC_DIST_ENABLE_CLEAR + i / 8);
+       }
 
        if (sync_access)
                sync_access();
@@ -102,7 +105,9 @@ void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
        /*
         * Deal with the banked PPI and SGI interrupts - disable all
         * PPI interrupts, ensure all SGI interrupts are enabled.
+        * Make sure everything is deactivated.
         */
+       writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR);
        writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR);
        writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET);
 
index 515c823..abf2ffa 100644 (file)
@@ -73,9 +73,11 @@ struct gic_chip_data {
        union gic_base cpu_base;
 #ifdef CONFIG_CPU_PM
        u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+       u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
        u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
        u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
        u32 __percpu *saved_ppi_enable;
+       u32 __percpu *saved_ppi_active;
        u32 __percpu *saved_ppi_conf;
 #endif
        struct irq_domain *domain;
@@ -566,6 +568,10 @@ static void gic_dist_save(unsigned int gic_nr)
        for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
                gic_data[gic_nr].saved_spi_enable[i] =
                        readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+               gic_data[gic_nr].saved_spi_active[i] =
+                       readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
 }
 
 /*
@@ -604,9 +610,19 @@ static void gic_dist_restore(unsigned int gic_nr)
                writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
                        dist_base + GIC_DIST_TARGET + i * 4);
 
-       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
+               writel_relaxed(GICD_INT_EN_CLR_X32,
+                       dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
                writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
                        dist_base + GIC_DIST_ENABLE_SET + i * 4);
+       }
+
+       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
+               writel_relaxed(GICD_INT_EN_CLR_X32,
+                       dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
+               writel_relaxed(gic_data[gic_nr].saved_spi_active[i],
+                       dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+       }
 
        writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
 }
@@ -631,6 +647,10 @@ static void gic_cpu_save(unsigned int gic_nr)
        for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
                ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
 
+       ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
+       for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+               ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+
        ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
        for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
                ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
@@ -654,8 +674,18 @@ static void gic_cpu_restore(unsigned int gic_nr)
                return;
 
        ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
-       for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+       for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
+               writel_relaxed(GICD_INT_EN_CLR_X32,
+                              dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
                writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+       }
+
+       ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
+       for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
+               writel_relaxed(GICD_INT_EN_CLR_X32,
+                              dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
+               writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+       }
 
        ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
        for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
@@ -710,6 +740,10 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
                sizeof(u32));
        BUG_ON(!gic->saved_ppi_enable);
 
+       gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
+               sizeof(u32));
+       BUG_ON(!gic->saved_ppi_active);
+
        gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
                sizeof(u32));
        BUG_ON(!gic->saved_ppi_conf);
index f659e60..5178645 100644 (file)
@@ -160,11 +160,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
 }
 EXPORT_SYMBOL(nvm_erase_blk);
 
-static void nvm_core_free(struct nvm_dev *dev)
-{
-       kfree(dev);
-}
-
 static int nvm_core_init(struct nvm_dev *dev)
 {
        struct nvm_id *id = &dev->identity;
@@ -179,12 +174,21 @@ static int nvm_core_init(struct nvm_dev *dev)
        dev->sec_size = grp->csecs;
        dev->oob_size = grp->sos;
        dev->sec_per_pg = grp->fpg_sz / grp->csecs;
-       dev->addr_mode = id->ppat;
-       dev->addr_format = id->ppaf;
+       memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
 
        dev->plane_mode = NVM_PLANE_SINGLE;
        dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
 
+       if (grp->mtype != 0) {
+               pr_err("nvm: memory type not supported\n");
+               return -EINVAL;
+       }
+
+       if (grp->fmtype != 0 && grp->fmtype != 1) {
+               pr_err("nvm: flash type not supported\n");
+               return -EINVAL;
+       }
+
        if (grp->mpos & 0x020202)
                dev->plane_mode = NVM_PLANE_DOUBLE;
        if (grp->mpos & 0x040404)
@@ -213,21 +217,18 @@ static void nvm_free(struct nvm_dev *dev)
 
        if (dev->mt)
                dev->mt->unregister_mgr(dev);
-
-       nvm_core_free(dev);
 }
 
 static int nvm_init(struct nvm_dev *dev)
 {
        struct nvmm_type *mt;
-       int ret = 0;
+       int ret = -EINVAL;
 
        if (!dev->q || !dev->ops)
-               return -EINVAL;
+               return ret;
 
        if (dev->ops->identity(dev->q, &dev->identity)) {
                pr_err("nvm: device could not be identified\n");
-               ret = -EINVAL;
                goto err;
        }
 
@@ -273,7 +274,6 @@ static int nvm_init(struct nvm_dev *dev)
                        dev->nr_chnls);
        return 0;
 err:
-       nvm_free(dev);
        pr_err("nvm: failed to initialize nvm\n");
        return ret;
 }
@@ -308,22 +308,24 @@ int nvm_register(struct request_queue *q, char *disk_name,
        if (ret)
                goto err_init;
 
-       down_write(&nvm_lock);
-       list_add(&dev->devices, &nvm_devices);
-       up_write(&nvm_lock);
-
        if (dev->ops->max_phys_sect > 1) {
                dev->ppalist_pool = dev->ops->create_dma_pool(dev->q,
                                                                "ppalist");
                if (!dev->ppalist_pool) {
                        pr_err("nvm: could not create ppa pool\n");
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto err_init;
                }
        } else if (dev->ops->max_phys_sect > 256) {
                pr_info("nvm: max sectors supported is 256.\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_init;
        }
 
+       down_write(&nvm_lock);
+       list_add(&dev->devices, &nvm_devices);
+       up_write(&nvm_lock);
+
        return 0;
 err_init:
        kfree(dev);
@@ -341,11 +343,12 @@ void nvm_unregister(char *disk_name)
                return;
        }
 
-       nvm_exit(dev);
-
        down_write(&nvm_lock);
        list_del(&dev->devices);
        up_write(&nvm_lock);
+
+       nvm_exit(dev);
+       kfree(dev);
 }
 EXPORT_SYMBOL(nvm_unregister);
 
@@ -457,11 +460,11 @@ static void nvm_remove_target(struct nvm_target *t)
        lockdep_assert_held(&nvm_lock);
 
        del_gendisk(tdisk);
+       blk_cleanup_queue(q);
+
        if (tt->exit)
                tt->exit(tdisk->private_data);
 
-       blk_cleanup_queue(q);
-
        put_disk(tdisk);
 
        list_del(&t->list);
@@ -541,7 +544,7 @@ static int nvm_configure_show(const char *val)
        if (!dev->mt)
                return 0;
 
-       dev->mt->free_blocks_print(dev);
+       dev->mt->lun_info_print(dev);
 
        return 0;
 }
index ae1fb2b..e20e74e 100644 (file)
@@ -60,23 +60,28 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
                lun->vlun.lun_id = i % dev->luns_per_chnl;
                lun->vlun.chnl_id = i / dev->luns_per_chnl;
                lun->vlun.nr_free_blocks = dev->blks_per_lun;
+               lun->vlun.nr_inuse_blocks = 0;
+               lun->vlun.nr_bad_blocks = 0;
        }
        return 0;
 }
 
-static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks,
+static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
                                                                void *private)
 {
        struct gen_nvm *gn = private;
-       struct gen_lun *lun = &gn->luns[lun_id];
+       struct nvm_dev *dev = gn->dev;
+       struct gen_lun *lun;
        struct nvm_block *blk;
        int i;
 
-       if (unlikely(bitmap_empty(bb_bitmap, nr_blocks)))
-               return 0;
+       ppa = dev_to_generic_addr(gn->dev, ppa);
+       lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
+
+       for (i = 0; i < nr_blocks; i++) {
+               if (blks[i] == 0)
+                       continue;
 
-       i = -1;
-       while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) {
                blk = &lun->vlun.blocks[i];
                if (!blk) {
                        pr_err("gennvm: BB data is out of bounds.\n");
@@ -84,6 +89,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks,
                }
 
                list_move_tail(&blk->list, &lun->bb_list);
+               lun->vlun.nr_bad_blocks++;
        }
 
        return 0;
@@ -136,6 +142,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
                        list_move_tail(&blk->list, &lun->used_list);
                        blk->type = 1;
                        lun->vlun.nr_free_blocks--;
+                       lun->vlun.nr_inuse_blocks++;
                }
        }
 
@@ -164,15 +171,25 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
                        block->id = cur_block_id++;
 
                        /* First block is reserved for device */
-                       if (unlikely(lun_iter == 0 && blk_iter == 0))
+                       if (unlikely(lun_iter == 0 && blk_iter == 0)) {
+                               lun->vlun.nr_free_blocks--;
                                continue;
+                       }
 
                        list_add_tail(&block->list, &lun->free_list);
                }
 
                if (dev->ops->get_bb_tbl) {
-                       ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id,
-                                       dev->blks_per_lun, gennvm_block_bb, gn);
+                       struct ppa_addr ppa;
+
+                       ppa.ppa = 0;
+                       ppa.g.ch = lun->vlun.chnl_id;
+                       ppa.g.lun = lun->vlun.id;
+                       ppa = generic_to_dev_addr(dev, ppa);
+
+                       ret = dev->ops->get_bb_tbl(dev->q, ppa,
+                                               dev->blks_per_lun,
+                                               gennvm_block_bb, gn);
                        if (ret)
                                pr_err("gennvm: could not read BB table\n");
                }
@@ -199,6 +216,7 @@ static int gennvm_register(struct nvm_dev *dev)
        if (!gn)
                return -ENOMEM;
 
+       gn->dev = dev;
        gn->nr_luns = dev->nr_luns;
        dev->mp = gn;
 
@@ -254,6 +272,7 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
        blk->type = 1;
 
        lun->vlun.nr_free_blocks--;
+       lun->vlun.nr_inuse_blocks++;
 
        spin_unlock(&vlun->lock);
 out:
@@ -271,16 +290,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
        case 1:
                list_move_tail(&blk->list, &lun->free_list);
                lun->vlun.nr_free_blocks++;
+               lun->vlun.nr_inuse_blocks--;
                blk->type = 0;
                break;
        case 2:
                list_move_tail(&blk->list, &lun->bb_list);
+               lun->vlun.nr_bad_blocks++;
+               lun->vlun.nr_inuse_blocks--;
                break;
        default:
                WARN_ON_ONCE(1);
                pr_err("gennvm: erroneous block type (%lu -> %u)\n",
                                                        blk->id, blk->type);
                list_move_tail(&blk->list, &lun->bb_list);
+               lun->vlun.nr_bad_blocks++;
+               lun->vlun.nr_inuse_blocks--;
        }
 
        spin_unlock(&vlun->lock);
@@ -292,10 +316,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
 
        if (rqd->nr_pages > 1) {
                for (i = 0; i < rqd->nr_pages; i++)
-                       rqd->ppa_list[i] = addr_to_generic_mode(dev,
+                       rqd->ppa_list[i] = dev_to_generic_addr(dev,
                                                        rqd->ppa_list[i]);
        } else {
-               rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr);
+               rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
        }
 }
 
@@ -305,10 +329,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
 
        if (rqd->nr_pages > 1) {
                for (i = 0; i < rqd->nr_pages; i++)
-                       rqd->ppa_list[i] = generic_to_addr_mode(dev,
+                       rqd->ppa_list[i] = generic_to_dev_addr(dev,
                                                        rqd->ppa_list[i]);
        } else {
-               rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr);
+               rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
        }
 }
 
@@ -354,10 +378,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
 {
        int i;
 
-       if (!dev->ops->set_bb)
+       if (!dev->ops->set_bb_tbl)
                return;
 
-       if (dev->ops->set_bb(dev->q, rqd, 1))
+       if (dev->ops->set_bb_tbl(dev->q, rqd, 1))
                return;
 
        gennvm_addr_to_generic_mode(dev, rqd);
@@ -440,15 +464,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
        return &gn->luns[lunid].vlun;
 }
 
-static void gennvm_free_blocks_print(struct nvm_dev *dev)
+static void gennvm_lun_info_print(struct nvm_dev *dev)
 {
        struct gen_nvm *gn = dev->mp;
        struct gen_lun *lun;
        unsigned int i;
 
-       gennvm_for_each_lun(gn, lun, i)
-               pr_info("%s: lun%8u\t%u\n",
-                                       dev->name, i, lun->vlun.nr_free_blocks);
+
+       gennvm_for_each_lun(gn, lun, i) {
+               spin_lock(&lun->vlun.lock);
+
+               pr_info("%s: lun%8u\t%u\t%u\t%u\n",
+                               dev->name, i,
+                               lun->vlun.nr_free_blocks,
+                               lun->vlun.nr_inuse_blocks,
+                               lun->vlun.nr_bad_blocks);
+
+               spin_unlock(&lun->vlun.lock);
+       }
 }
 
 static struct nvmm_type gennvm = {
@@ -466,7 +499,7 @@ static struct nvmm_type gennvm = {
        .erase_blk      = gennvm_erase_blk,
 
        .get_lun        = gennvm_get_lun,
-       .free_blocks_print = gennvm_free_blocks_print,
+       .lun_info_print = gennvm_lun_info_print,
 };
 
 static int __init gennvm_module_init(void)
index d23bd35..9c24b5b 100644 (file)
@@ -35,6 +35,8 @@ struct gen_lun {
 };
 
 struct gen_nvm {
+       struct nvm_dev *dev;
+
        int nr_luns;
        struct gen_lun *luns;
 };
index 7ba64c8..75e59c3 100644 (file)
@@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
        return blk->id * rrpc->dev->pgs_per_blk;
 }
 
+static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
+                                                       struct ppa_addr r)
+{
+       struct ppa_addr l;
+       int secs, pgs, blks, luns;
+       sector_t ppa = r.ppa;
+
+       l.ppa = 0;
+
+       div_u64_rem(ppa, dev->sec_per_pg, &secs);
+       l.g.sec = secs;
+
+       sector_div(ppa, dev->sec_per_pg);
+       div_u64_rem(ppa, dev->sec_per_blk, &pgs);
+       l.g.pg = pgs;
+
+       sector_div(ppa, dev->pgs_per_blk);
+       div_u64_rem(ppa, dev->blks_per_lun, &blks);
+       l.g.blk = blks;
+
+       sector_div(ppa, dev->blks_per_lun);
+       div_u64_rem(ppa, dev->luns_per_chnl, &luns);
+       l.g.lun = luns;
+
+       sector_div(ppa, dev->luns_per_chnl);
+       l.g.ch = ppa;
+
+       return l;
+}
+
 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
 {
        struct ppa_addr paddr;
 
        paddr.ppa = addr;
-       return __linear_to_generic_addr(dev, paddr);
+       return linear_to_generic_addr(dev, paddr);
 }
 
 /* requires lun->lock taken */
index 917d47e..3147c8d 100644 (file)
@@ -112,7 +112,8 @@ struct iv_tcw_private {
  * and encrypts / decrypts at the same time.
  */
 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
-            DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
+            DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
+            DM_CRYPT_EXIT_THREAD};
 
 /*
  * The fields in here must be read only after initialization.
@@ -1203,20 +1204,18 @@ continue_locked:
                if (!RB_EMPTY_ROOT(&cc->write_tree))
                        goto pop_from_list;
 
+               if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
+                       spin_unlock_irq(&cc->write_thread_wait.lock);
+                       break;
+               }
+
                __set_current_state(TASK_INTERRUPTIBLE);
                __add_wait_queue(&cc->write_thread_wait, &wait);
 
                spin_unlock_irq(&cc->write_thread_wait.lock);
 
-               if (unlikely(kthread_should_stop())) {
-                       set_task_state(current, TASK_RUNNING);
-                       remove_wait_queue(&cc->write_thread_wait, &wait);
-                       break;
-               }
-
                schedule();
 
-               set_task_state(current, TASK_RUNNING);
                spin_lock_irq(&cc->write_thread_wait.lock);
                __remove_wait_queue(&cc->write_thread_wait, &wait);
                goto continue_locked;
@@ -1531,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti)
        if (!cc)
                return;
 
-       if (cc->write_thread)
+       if (cc->write_thread) {
+               spin_lock_irq(&cc->write_thread_wait.lock);
+               set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
+               wake_up_locked(&cc->write_thread_wait);
+               spin_unlock_irq(&cc->write_thread_wait.lock);
                kthread_stop(cc->write_thread);
+       }
 
        if (cc->io_queue)
                destroy_workqueue(cc->io_queue);
index aaa6caa..cfa29f5 100644 (file)
@@ -1537,32 +1537,34 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
                struct block_device **bdev, fmode_t *mode)
 {
        struct multipath *m = ti->private;
-       struct pgpath *pgpath;
        unsigned long flags;
        int r;
 
-       r = 0;
-
        spin_lock_irqsave(&m->lock, flags);
 
        if (!m->current_pgpath)
                __choose_pgpath(m, 0);
 
-       pgpath = m->current_pgpath;
-
-       if (pgpath) {
-               *bdev = pgpath->path.dev->bdev;
-               *mode = pgpath->path.dev->mode;
+       if (m->current_pgpath) {
+               if (!m->queue_io) {
+                       *bdev = m->current_pgpath->path.dev->bdev;
+                       *mode = m->current_pgpath->path.dev->mode;
+                       r = 0;
+               } else {
+                       /* pg_init has not started or completed */
+                       r = -ENOTCONN;
+               }
+       } else {
+               /* No path is available */
+               if (m->queue_if_no_path)
+                       r = -ENOTCONN;
+               else
+                       r = -EIO;
        }
 
-       if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
-               r = -ENOTCONN;
-       else if (!*bdev)
-               r = -EIO;
-
        spin_unlock_irqrestore(&m->lock, flags);
 
-       if (r == -ENOTCONN && !fatal_signal_pending(current)) {
+       if (r == -ENOTCONN) {
                spin_lock_irqsave(&m->lock, flags);
                if (!m->current_pg) {
                        /* Path status changed, redo selection */
index 3897b90..63903a5 100644 (file)
@@ -2432,6 +2432,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
        case PM_WRITE:
                if (old_mode != new_mode)
                        notify_of_pool_mode_change(pool, "write");
+               pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
                dm_pool_metadata_read_write(pool->pmd);
                pool->process_bio = process_bio;
                pool->process_discard = process_discard_bio;
@@ -4249,10 +4250,9 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
        struct thin_c *tc = ti->private;
        struct pool *pool = tc->pool;
-       struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
 
-       if (!pool_limits->discard_granularity)
-               return; /* pool's discard support is disabled */
+       if (!pool->pf.discard_enabled)
+               return;
 
        limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
        limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
index 6e15f35..5df4048 100644 (file)
@@ -591,7 +591,7 @@ retry:
 
 out:
        dm_put_live_table(md, *srcu_idx);
-       if (r == -ENOTCONN) {
+       if (r == -ENOTCONN && !fatal_signal_pending(current)) {
                msleep(10);
                goto retry;
        }
@@ -603,9 +603,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
 {
        struct mapped_device *md = bdev->bd_disk->private_data;
        struct dm_target *tgt;
+       struct block_device *tgt_bdev = NULL;
        int srcu_idx, r;
 
-       r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+       r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx);
        if (r < 0)
                return r;
 
@@ -620,7 +621,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
                        goto out;
        }
 
-       r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+       r =  __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg);
 out:
        dm_put_live_table(md, srcu_idx);
        return r;
index 35759a9..e8f8472 100644 (file)
@@ -1992,9 +1992,9 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
                (unsigned long long)pci_resource_start(pci_dev, 0));
 
        pci_set_master(pci_dev);
-       if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
+       err = pci_set_dma_mask(pci_dev, 0xffffffff);
+       if (err) {
                printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
-               err = -EIO;
                goto fail_context;
        }
 
index dbc695f..0042803 100644 (file)
@@ -1319,7 +1319,8 @@ static int cx25821_initdev(struct pci_dev *pci_dev,
                dev->pci_lat, (unsigned long long)dev->base_io_addr);
 
        pci_set_master(pci_dev);
-       if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
+       err = pci_set_dma_mask(pci_dev, 0xffffffff);
+       if (err) {
                pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
                err = -EIO;
                goto fail_irq;
index 0ed1b65..1b5268f 100644 (file)
@@ -890,9 +890,9 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
                return err;
        }
 
-       if (!pci_set_dma_mask(pci,DMA_BIT_MASK(32))) {
+       err = pci_set_dma_mask(pci,DMA_BIT_MASK(32));
+       if (err) {
                dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
-               err = -EIO;
                cx88_core_put(core, pci);
                return err;
        }
index 9db7767..f34c229 100644 (file)
@@ -393,7 +393,8 @@ static int cx8802_init_common(struct cx8802_dev *dev)
        if (pci_enable_device(dev->pci))
                return -EIO;
        pci_set_master(dev->pci);
-       if (!pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32))) {
+       err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32));
+       if (err) {
                printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name);
                return -EIO;
        }
index 0de1ad5..aef9acf 100644 (file)
@@ -1314,9 +1314,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
               dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
 
        pci_set_master(pci_dev);
-       if (!pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32))) {
+       err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32));
+       if (err) {
                printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
-               err = -EIO;
                goto fail_core;
        }
        dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
index 60b2d46..3fdbd81 100644 (file)
@@ -810,7 +810,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
                "%s(): board vendor 0x%x, revision 0x%x\n",
                __func__, board_vendor, board_revision);
        pci_set_master(pci_dev);
-       if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
+       if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
                dev_err(&pci_dev->dev,
                        "%s(): 32bit PCI DMA is not supported\n", __func__);
                goto pci_detect_err;
index e79d63e..f720cea 100644 (file)
@@ -951,9 +951,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
               pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
               dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
        pci_set_master(pci_dev);
-       if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+       err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+       if (err) {
                pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
-               err = -EIO;
                goto fail1;
        }
 
index 8f36b48..8bbd092 100644 (file)
@@ -1264,9 +1264,9 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
 
        pci_set_master(pci_dev);
        /* TODO */
-       if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
+       err = pci_set_dma_mask(pci_dev, 0xffffffff);
+       if (err) {
                printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
-               err = -EIO;
                goto fail_irq;
        }
 
index 8c5655d..4e77618 100644 (file)
@@ -257,9 +257,9 @@ static int tw68_initdev(struct pci_dev *pci_dev,
                dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
                dev->pci_lat, (u64)pci_resource_start(pci_dev, 0));
        pci_set_master(pci_dev);
-       if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+       err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+       if (err) {
                pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
-               err = -EIO;
                goto fail1;
        }
 
index dc4e844..5a99a93 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <linux/gpio.h>
 
+#include <asm/mach-jz4740/gpio.h>
 #include <asm/mach-jz4740/jz4740_nand.h>
 
 #define JZ_REG_NAND_CTRL       0x50
index cc74142..ece544e 100644 (file)
@@ -3110,7 +3110,7 @@ static void nand_resume(struct mtd_info *mtd)
  */
 static void nand_shutdown(struct mtd_info *mtd)
 {
-       nand_get_device(mtd, FL_SHUTDOWN);
+       nand_get_device(mtd, FL_PM_SUSPENDED);
 }
 
 /* Set default functions */
index e2afabf..7ccebae 100644 (file)
@@ -1500,10 +1500,11 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
                return -ENODEV;
        }
 
-       if (!pci_set_dma_mask(pdev, PCNET32_DMA_MASK)) {
+       err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
+       if (err) {
                if (pcnet32_debug & NETIF_MSG_PROBE)
                        pr_err("architecture does not support 32bit PCI busmaster DMA\n");
-               return -ENODEV;
+               return err;
        }
        if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
                if (pcnet32_debug & NETIF_MSG_PROBE)
index e0b7b95..9202d1a 100644 (file)
@@ -93,7 +93,7 @@ struct nvme_nvm_l2ptbl {
        __le16                  cdw14[6];
 };
 
-struct nvme_nvm_bbtbl {
+struct nvme_nvm_getbbtbl {
        __u8                    opcode;
        __u8                    flags;
        __u16                   command_id;
@@ -101,10 +101,23 @@ struct nvme_nvm_bbtbl {
        __u64                   rsvd[2];
        __le64                  prp1;
        __le64                  prp2;
-       __le32                  prp1_len;
-       __le32                  prp2_len;
-       __le32                  lbb;
-       __u32                   rsvd11[3];
+       __le64                  spba;
+       __u32                   rsvd4[4];
+};
+
+struct nvme_nvm_setbbtbl {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __le64                  rsvd[2];
+       __le64                  prp1;
+       __le64                  prp2;
+       __le64                  spba;
+       __le16                  nlb;
+       __u8                    value;
+       __u8                    rsvd3;
+       __u32                   rsvd4[3];
 };
 
 struct nvme_nvm_erase_blk {
@@ -129,8 +142,8 @@ struct nvme_nvm_command {
                struct nvme_nvm_hb_rw hb_rw;
                struct nvme_nvm_ph_rw ph_rw;
                struct nvme_nvm_l2ptbl l2p;
-               struct nvme_nvm_bbtbl get_bb;
-               struct nvme_nvm_bbtbl set_bb;
+               struct nvme_nvm_getbbtbl get_bb;
+               struct nvme_nvm_setbbtbl set_bb;
                struct nvme_nvm_erase_blk erase;
        };
 };
@@ -142,11 +155,13 @@ struct nvme_nvm_id_group {
        __u8                    num_ch;
        __u8                    num_lun;
        __u8                    num_pln;
+       __u8                    rsvd1;
        __le16                  num_blk;
        __le16                  num_pg;
        __le16                  fpg_sz;
        __le16                  csecs;
        __le16                  sos;
+       __le16                  rsvd2;
        __le32                  trdt;
        __le32                  trdm;
        __le32                  tprt;
@@ -154,8 +169,9 @@ struct nvme_nvm_id_group {
        __le32                  tbet;
        __le32                  tbem;
        __le32                  mpos;
+       __le32                  mccap;
        __le16                  cpar;
-       __u8                    reserved[913];
+       __u8                    reserved[906];
 } __packed;
 
 struct nvme_nvm_addr_format {
@@ -178,15 +194,28 @@ struct nvme_nvm_id {
        __u8                    ver_id;
        __u8                    vmnt;
        __u8                    cgrps;
-       __u8                    res[5];
+       __u8                    res;
        __le32                  cap;
        __le32                  dom;
        struct nvme_nvm_addr_format ppaf;
-       __u8                    ppat;
-       __u8                    resv[223];
+       __u8                    resv[228];
        struct nvme_nvm_id_group groups[4];
 } __packed;
 
+struct nvme_nvm_bb_tbl {
+       __u8    tblid[4];
+       __le16  verid;
+       __le16  revid;
+       __le32  rvsd1;
+       __le32  tblks;
+       __le32  tfact;
+       __le32  tgrown;
+       __le32  tdresv;
+       __le32  thresv;
+       __le32  rsvd2[8];
+       __u8    blk[0];
+};
+
 /*
  * Check we didn't inadvertently grow the command struct
  */
@@ -195,12 +224,14 @@ static inline void _nvme_nvm_check_size(void)
        BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
        BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
        BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
 }
 
 static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
@@ -234,6 +265,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
                dst->tbet = le32_to_cpu(src->tbet);
                dst->tbem = le32_to_cpu(src->tbem);
                dst->mpos = le32_to_cpu(src->mpos);
+               dst->mccap = le32_to_cpu(src->mccap);
 
                dst->cpar = le16_to_cpu(src->cpar);
        }
@@ -244,6 +276,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
 static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
 {
        struct nvme_ns *ns = q->queuedata;
+       struct nvme_dev *dev = ns->dev;
        struct nvme_nvm_id *nvme_nvm_id;
        struct nvme_nvm_command c = {};
        int ret;
@@ -256,8 +289,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
        if (!nvme_nvm_id)
                return -ENOMEM;
 
-       ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id,
-                                               sizeof(struct nvme_nvm_id));
+       ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
+                               nvme_nvm_id, sizeof(struct nvme_nvm_id));
        if (ret) {
                ret = -EIO;
                goto out;
@@ -268,6 +301,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
        nvm_id->cgrps = nvme_nvm_id->cgrps;
        nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
        nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
+       memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
+                                       sizeof(struct nvme_nvm_addr_format));
 
        ret = init_grps(nvm_id, nvme_nvm_id);
 out:
@@ -281,7 +316,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
        struct nvme_ns *ns = q->queuedata;
        struct nvme_dev *dev = ns->dev;
        struct nvme_nvm_command c = {};
-       u32 len = queue_max_hw_sectors(q) << 9;
+       u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
        u32 nlb_pr_rq = len / sizeof(u64);
        u64 cmd_slba = slba;
        void *entries;
@@ -299,8 +334,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
                c.l2p.slba = cpu_to_le64(cmd_slba);
                c.l2p.nlb = cpu_to_le32(cmd_nlb);
 
-               ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c,
-                                                               entries, len);
+               ret = nvme_submit_sync_cmd(dev->admin_q,
+                               (struct nvme_command *)&c, entries, len);
                if (ret) {
                        dev_err(dev->dev, "L2P table transfer failed (%d)\n",
                                                                        ret);
@@ -322,43 +357,82 @@ out:
        return ret;
 }
 
-static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid,
-                               unsigned int nr_blocks,
-                               nvm_bb_update_fn *update_bbtbl, void *priv)
+static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa,
+                               int nr_blocks, nvm_bb_update_fn *update_bbtbl,
+                               void *priv)
 {
        struct nvme_ns *ns = q->queuedata;
        struct nvme_dev *dev = ns->dev;
        struct nvme_nvm_command c = {};
-       void *bb_bitmap;
-       u16 bb_bitmap_size;
+       struct nvme_nvm_bb_tbl *bb_tbl;
+       int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
        int ret = 0;
 
        c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
        c.get_bb.nsid = cpu_to_le32(ns->ns_id);
-       c.get_bb.lbb = cpu_to_le32(lunid);
-       bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE;
-       bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL);
-       if (!bb_bitmap)
-               return -ENOMEM;
+       c.get_bb.spba = cpu_to_le64(ppa.ppa);
 
-       bitmap_zero(bb_bitmap, nr_blocks);
+       bb_tbl = kzalloc(tblsz, GFP_KERNEL);
+       if (!bb_tbl)
+               return -ENOMEM;
 
-       ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap,
-                                                               bb_bitmap_size);
+       ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
+                                                               bb_tbl, tblsz);
        if (ret) {
                dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
                ret = -EIO;
                goto out;
        }
 
-       ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv);
+       if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
+               bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
+               dev_err(dev->dev, "bbt format mismatch\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (le16_to_cpu(bb_tbl->verid) != 1) {
+               ret = -EINVAL;
+               dev_err(dev->dev, "bbt version not supported\n");
+               goto out;
+       }
+
+       if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
+               ret = -EINVAL;
+               dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
+                                       le32_to_cpu(bb_tbl->tblks), nr_blocks);
+               goto out;
+       }
+
+       ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
        if (ret) {
                ret = -EINTR;
                goto out;
        }
 
 out:
-       kfree(bb_bitmap);
+       kfree(bb_tbl);
+       return ret;
+}
+
+static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd,
+                                                               int type)
+{
+       struct nvme_ns *ns = q->queuedata;
+       struct nvme_dev *dev = ns->dev;
+       struct nvme_nvm_command c = {};
+       int ret = 0;
+
+       c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
+       c.set_bb.nsid = cpu_to_le32(ns->ns_id);
+       c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
+       c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
+       c.set_bb.value = type;
+
+       ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
+                                                               NULL, 0);
+       if (ret)
+               dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
        return ret;
 }
 
@@ -474,6 +548,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
        .get_l2p_tbl            = nvme_nvm_get_l2p_tbl,
 
        .get_bb_tbl             = nvme_nvm_get_bb_tbl,
+       .set_bb_tbl             = nvme_nvm_set_bb_tbl,
 
        .submit_io              = nvme_nvm_submit_io,
        .erase_block            = nvme_nvm_erase_block,
index 8187df2..f3b53af 100644 (file)
@@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                        goto retry_cmd;
                }
                if (blk_integrity_rq(req)) {
-                       if (blk_rq_count_integrity_sg(req->q, req->bio) != 1)
+                       if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
+                               dma_unmap_sg(dev->dev, iod->sg, iod->nents,
+                                               dma_dir);
                                goto error_cmd;
+                       }
 
                        sg_init_table(iod->meta_sg, 1);
                        if (blk_rq_map_integrity_sg(
-                                       req->q, req->bio, iod->meta_sg) != 1)
+                                       req->q, req->bio, iod->meta_sg) != 1) {
+                               dma_unmap_sg(dev->dev, iod->sg, iod->nents,
+                                               dma_dir);
                                goto error_cmd;
+                       }
 
                        if (rq_data_dir(req))
                                nvme_dif_remap(req, nvme_dif_prep);
 
-                       if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir))
+                       if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
+                               dma_unmap_sg(dev->dev, iod->sg, iod->nents,
+                                               dma_dir);
                                goto error_cmd;
+                       }
                }
        }
 
@@ -968,7 +977,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
        if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
                return;
 
-       writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
+       if (likely(nvmeq->cq_vector >= 0))
+               writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
        nvmeq->cq_head = head;
        nvmeq->cq_phase = phase;
 
@@ -1727,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        u32 aqa;
        u64 cap = lo_hi_readq(&dev->bar->cap);
        struct nvme_queue *nvmeq;
-       unsigned page_shift = PAGE_SHIFT;
+       /*
+        * default to a 4K page size, with the intention to update this
+        * path in the future to accomodate architectures with differing
+        * kernel and IO page sizes.
+        */
+       unsigned page_shift = 12;
        unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
-       unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
 
        if (page_shift < dev_page_min) {
                dev_err(dev->dev,
@@ -1738,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
                                1 << page_shift);
                return -ENODEV;
        }
-       if (page_shift > dev_page_max) {
-               dev_info(dev->dev,
-                               "Device maximum page size (%u) smaller than "
-                               "host (%u); enabling work-around\n",
-                               1 << dev_page_max, 1 << page_shift);
-               page_shift = dev_page_max;
-       }
 
        dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
                                                NVME_CAP_NSSRC(cap) : 0;
@@ -2268,7 +2275,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
        if (dev->max_hw_sectors) {
                blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
                blk_queue_max_segments(ns->queue,
-                       ((dev->max_hw_sectors << 9) / dev->page_size) + 1);
+                       (dev->max_hw_sectors / (dev->page_size >> 9)) + 1);
        }
        if (dev->stripe_size)
                blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
@@ -2787,6 +2794,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq)
 {
        struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
        nvme_put_dq(dq);
+
+       spin_lock_irq(&nvmeq->q_lock);
+       nvme_process_cq(nvmeq);
+       spin_unlock_irq(&nvmeq->q_lock);
 }
 
 static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
index 540f077..02a7452 100644 (file)
@@ -440,7 +440,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
                                         ret, pp->io);
                                continue;
                        }
-                       pp->io_base = pp->io->start;
                        break;
                case IORESOURCE_MEM:
                        pp->mem = win->res;
index 35457ec..163671a 100644 (file)
@@ -111,7 +111,7 @@ static struct pcie_host_ops hisi_pcie_host_ops = {
        .link_up = hisi_pcie_link_up,
 };
 
-static int __init hisi_add_pcie_port(struct pcie_port *pp,
+static int hisi_add_pcie_port(struct pcie_port *pp,
                                     struct platform_device *pdev)
 {
        int ret;
@@ -139,7 +139,7 @@ static int __init hisi_add_pcie_port(struct pcie_port *pp,
        return 0;
 }
 
-static int __init hisi_pcie_probe(struct platform_device *pdev)
+static int hisi_pcie_probe(struct platform_device *pdev)
 {
        struct hisi_pcie *hisi_pcie;
        struct pcie_port *pp;
index 9261868..eead54c 100644 (file)
@@ -216,7 +216,10 @@ static ssize_t numa_node_store(struct device *dev,
        if (ret)
                return ret;
 
-       if (node >= MAX_NUMNODES || !node_online(node))
+       if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
+               return -EINVAL;
+
+       if (node != NUMA_NO_NODE && !node_online(node))
                return -EINVAL;
 
        add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
index fd2f03f..d390fc1 100644 (file)
@@ -337,6 +337,4 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
 }
 #endif
 
-struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
-
 #endif /* DRIVERS_PCI_H */
index e735c72..edb1984 100644 (file)
@@ -1685,8 +1685,8 @@ static void pci_dma_configure(struct pci_dev *dev)
 {
        struct device *bridge = pci_get_host_bridge_device(dev);
 
-       if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) {
-               if (bridge->parent)
+       if (IS_ENABLED(CONFIG_OF) &&
+               bridge->parent && bridge->parent->of_node) {
                        of_dma_configure(&dev->dev, bridge->parent->of_node);
        } else if (has_acpi_companion(bridge)) {
                struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
index b422e4e..312c78b 100644 (file)
@@ -5,8 +5,6 @@
 config PINCTRL
        bool
 
-if PINCTRL
-
 menu "Pin controllers"
        depends on PINCTRL
 
@@ -274,5 +272,3 @@ config PINCTRL_TB10X
        select GPIOLIB
 
 endmenu
-
-endif
index 88a7fac..acaf84c 100644 (file)
@@ -538,8 +538,10 @@ static int imx1_pinctrl_parse_functions(struct device_node *np,
                func->groups[i] = child->name;
                grp = &info->groups[grp_index++];
                ret = imx1_pinctrl_parse_groups(child, grp, info, i++);
-               if (ret == -ENOMEM)
+               if (ret == -ENOMEM) {
+                       of_node_put(child);
                        return ret;
+               }
        }
 
        return 0;
@@ -582,8 +584,10 @@ static int imx1_pinctrl_parse_dt(struct platform_device *pdev,
 
        for_each_child_of_node(np, child) {
                ret = imx1_pinctrl_parse_functions(child, info, ifunc++);
-               if (ret == -ENOMEM)
+               if (ret == -ENOMEM) {
+                       of_node_put(child);
                        return -ENOMEM;
+               }
        }
 
        return 0;
index f307f1d..5c71727 100644 (file)
@@ -747,7 +747,7 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
        reg_addr =  mtk_get_port(pctl, offset) + pctl->devdata->dir_offset;
        bit = BIT(offset & 0xf);
        regmap_read(pctl->regmap1, reg_addr, &read_val);
-       return !!(read_val & bit);
+       return !(read_val & bit);
 }
 
 static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -757,12 +757,8 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
        unsigned int read_val = 0;
        struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
 
-       if (mtk_gpio_get_direction(chip, offset))
-               reg_addr = mtk_get_port(pctl, offset) +
-                       pctl->devdata->dout_offset;
-       else
-               reg_addr = mtk_get_port(pctl, offset) +
-                       pctl->devdata->din_offset;
+       reg_addr = mtk_get_port(pctl, offset) +
+               pctl->devdata->din_offset;
 
        bit = BIT(offset & 0xf);
        regmap_read(pctl->regmap1, reg_addr, &read_val);
@@ -997,6 +993,7 @@ static struct gpio_chip mtk_gpio_chip = {
        .owner                  = THIS_MODULE,
        .request                = gpiochip_generic_request,
        .free                   = gpiochip_generic_free,
+       .get_direction          = mtk_gpio_get_direction,
        .direction_input        = mtk_gpio_direction_input,
        .direction_output       = mtk_gpio_direction_output,
        .get                    = mtk_gpio_get,
index d809c9e..19a3c3b 100644 (file)
@@ -672,7 +672,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        pctrl->dev = &pdev->dev;
-       pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+       pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
 
        pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
        if (!pctrl->regmap) {
index 8982027..b868ef1 100644 (file)
@@ -763,7 +763,7 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        pctrl->dev = &pdev->dev;
-       pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+       pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
 
        pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
        if (!pctrl->regmap) {
index e7deb51..9842bb1 100644 (file)
        PORT_GP_12(5, fn, sfx)
 
 #undef _GP_DATA
-#define _GP_DATA(bank, pin, name, sfx)                                 \
+#define _GP_DATA(bank, pin, name, sfx, cfg)                            \
        PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT)
 
-#define _GP_INOUTSEL(bank, pin, name, sfx)     name##_IN, name##_OUT
-#define _GP_INDT(bank, pin, name, sfx)         name##_DATA
+#define _GP_INOUTSEL(bank, pin, name, sfx, cfg)        name##_IN, name##_OUT
+#define _GP_INDT(bank, pin, name, sfx, cfg)    name##_DATA
 #define GP_INOUTSEL(bank)      PORT_GP_32_REV(bank, _GP_INOUTSEL, unused)
 #define GP_INDT(bank)          PORT_GP_32_REV(bank, _GP_INDT, unused)
 
index 8b3130f..9e03d15 100644 (file)
@@ -1478,6 +1478,8 @@ module_init(remoteproc_init);
 
 static void __exit remoteproc_exit(void)
 {
+       ida_destroy(&rproc_dev_index);
+
        rproc_exit_debugfs();
 }
 module_exit(remoteproc_exit);
index 9d30809..916af50 100644 (file)
@@ -156,7 +156,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf,
        char buf[10];
        int ret;
 
-       if (count > sizeof(buf))
+       if (count < 1 || count > sizeof(buf))
                return count;
 
        ret = copy_from_user(buf, user_buf, count);
index 188006c..aa705bb 100644 (file)
@@ -15,9 +15,6 @@
 #include <linux/i2c.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/pm_wakeirq.h>
 #include <linux/rtc/ds1307.h>
 #include <linux/rtc.h>
 #include <linux/slab.h>
@@ -117,7 +114,6 @@ struct ds1307 {
 #define HAS_ALARM      1               /* bit 1 == irq claimed */
        struct i2c_client       *client;
        struct rtc_device       *rtc;
-       int                     wakeirq;
        s32 (*read_block_data)(const struct i2c_client *client, u8 command,
                               u8 length, u8 *values);
        s32 (*write_block_data)(const struct i2c_client *client, u8 command,
@@ -1138,7 +1134,10 @@ read_rtc:
                                bin2bcd(tmp));
        }
 
-       device_set_wakeup_capable(&client->dev, want_irq);
+       if (want_irq) {
+               device_set_wakeup_capable(&client->dev, true);
+               set_bit(HAS_ALARM, &ds1307->flags);
+       }
        ds1307->rtc = devm_rtc_device_register(&client->dev, client->name,
                                rtc_ops, THIS_MODULE);
        if (IS_ERR(ds1307->rtc)) {
@@ -1146,43 +1145,19 @@ read_rtc:
        }
 
        if (want_irq) {
-               struct device_node *node = client->dev.of_node;
-
                err = devm_request_threaded_irq(&client->dev,
                                                client->irq, NULL, irq_handler,
                                                IRQF_SHARED | IRQF_ONESHOT,
                                                ds1307->rtc->name, client);
                if (err) {
                        client->irq = 0;
+                       device_set_wakeup_capable(&client->dev, false);
+                       clear_bit(HAS_ALARM, &ds1307->flags);
                        dev_err(&client->dev, "unable to request IRQ!\n");
-                       goto no_irq;
-               }
-
-               set_bit(HAS_ALARM, &ds1307->flags);
-               dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
-
-               /* Currently supported by OF code only! */
-               if (!node)
-                       goto no_irq;
-
-               err = of_irq_get(node, 1);
-               if (err <= 0) {
-                       if (err == -EPROBE_DEFER)
-                               goto exit;
-                       goto no_irq;
-               }
-               ds1307->wakeirq = err;
-
-               err = dev_pm_set_dedicated_wake_irq(&client->dev,
-                                                   ds1307->wakeirq);
-               if (err) {
-                       dev_err(&client->dev, "unable to setup wakeIRQ %d!\n",
-                               err);
-                       goto exit;
-               }
+               } else
+                       dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
        }
 
-no_irq:
        if (chip->nvram_size) {
 
                ds1307->nvram = devm_kzalloc(&client->dev,
@@ -1226,9 +1201,6 @@ static int ds1307_remove(struct i2c_client *client)
 {
        struct ds1307 *ds1307 = i2c_get_clientdata(client);
 
-       if (ds1307->wakeirq)
-               dev_pm_clear_wake_irq(&client->dev);
-
        if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags))
                sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram);
 
index 3ba2e95..81af294 100644 (file)
@@ -902,7 +902,7 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item,
        return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
 }
 
-CONFIGFS_ATTR_WO(tcm_qla2xxx_tpg_, enable);
+CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable);
 CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions);
 CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type);
 
index 25abd4e..91a0030 100644 (file)
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
 
 static int __init sh_pm_runtime_init(void)
 {
-       if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
+       if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
                if (!of_find_compatible_node(NULL, NULL,
                                             "renesas,cpg-mstp-clocks"))
                        return 0;
index 9d50682..0a4ea80 100644 (file)
@@ -23,6 +23,7 @@ config MTK_PMIC_WRAP
 config MTK_SCPSYS
        bool "MediaTek SCPSYS Support"
        depends on ARCH_MEDIATEK || COMPILE_TEST
+       default ARM64 && ARCH_MEDIATEK
        select REGMAP
        select MTK_INFRACFG
        select PM_GENERIC_DOMAINS if PM
index f3a0b6a..8c03a80 100644 (file)
@@ -1179,7 +1179,7 @@ static int knav_queue_setup_link_ram(struct knav_device *kdev)
 
                block++;
                if (!block->size)
-                       return 0;
+                       continue;
 
                dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n",
                        block->phys, block->virt, block->size);
@@ -1519,9 +1519,9 @@ static int knav_queue_load_pdsp(struct knav_device *kdev,
 
        for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
                if (knav_acc_firmwares[i]) {
-                       ret = request_firmware(&fw,
-                                              knav_acc_firmwares[i],
-                                              kdev->dev);
+                       ret = request_firmware_direct(&fw,
+                                                     knav_acc_firmwares[i],
+                                                     kdev->dev);
                        if (!ret) {
                                found = true;
                                break;
index 6d5b38d..9d7f000 100644 (file)
@@ -18,7 +18,8 @@ source "drivers/staging/iio/resolver/Kconfig"
 source "drivers/staging/iio/trigger/Kconfig"
 
 config IIO_DUMMY_EVGEN
-       tristate
+       tristate
+       select IRQ_WORK
 
 config IIO_SIMPLE_DUMMY
        tristate "An example driver with no hardware requirements"
index d11c54b..b51f237 100644 (file)
@@ -76,7 +76,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
 
        if (mask == IIO_CHAN_INFO_RAW) {
                mutex_lock(&indio_dev->mlock);
-               clk_enable(info->clk);
+               clk_prepare_enable(info->clk);
                /* Measurement setup */
                __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
                             LPC32XX_ADC_SELECT(info->adc_base));
@@ -84,7 +84,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
                __raw_writel(AD_PDN_CTRL | AD_STROBE,
                             LPC32XX_ADC_CTRL(info->adc_base));
                wait_for_completion(&info->completion); /* set by ISR */
-               clk_disable(info->clk);
+               clk_disable_unprepare(info->clk);
                *val = info->value;
                mutex_unlock(&indio_dev->mlock);
 
index e10c6ff..9568bdb 100644 (file)
 #include "wilc_wlan.h"
 #include <linux/errno.h>
 #include <linux/slab.h>
-#include <linux/etherdevice.h>
 #define TAG_PARAM_OFFSET       (MAC_HDR_LEN + TIME_STAMP_LEN + \
                                                        BEACON_INTERVAL_LEN + CAP_INFO_LEN)
-#define ADDR1 4
-#define ADDR2 10
-#define ADDR3 16
 
 /* Basic Frame Type Codes (2-bit) */
 enum basic_frame_type {
@@ -175,32 +171,38 @@ static inline u8 get_from_ds(u8 *header)
        return ((header[1] & 0x02) >> 1);
 }
 
+/* This function extracts the MAC Address in 'address1' field of the MAC     */
+/* header and updates the MAC Address in the allocated 'addr' variable.      */
+static inline void get_address1(u8 *pu8msa, u8 *addr)
+{
+       memcpy(addr, pu8msa + 4, 6);
+}
+
+/* This function extracts the MAC Address in 'address2' field of the MAC     */
+/* header and updates the MAC Address in the allocated 'addr' variable.      */
+static inline void get_address2(u8 *pu8msa, u8 *addr)
+{
+       memcpy(addr, pu8msa + 10, 6);
+}
+
+/* This function extracts the MAC Address in 'address3' field of the MAC     */
+/* header and updates the MAC Address in the allocated 'addr' variable.      */
+static inline void get_address3(u8 *pu8msa, u8 *addr)
+{
+       memcpy(addr, pu8msa + 16, 6);
+}
+
 /* This function extracts the BSSID from the incoming WLAN packet based on   */
-/* the 'from ds' bit, and updates the MAC Address in the allocated 'data'    */
+/* the 'from ds' bit, and updates the MAC Address in the allocated 'addr'    */
 /* variable.                                                                 */
 static inline void get_BSSID(u8 *data, u8 *bssid)
 {
        if (get_from_ds(data) == 1)
-               /*
-                * Extract the MAC Address in 'address2' field of the MAC
-                * header and update the MAC Address in the allocated 'data'
-                *  variable.
-                */
-               ether_addr_copy(data, bssid + ADDR2);
+               get_address2(data, bssid);
        else if (get_to_ds(data) == 1)
-               /*
-                * Extract the MAC Address in 'address1' field of the MAC
-                * header and update the MAC Address in the allocated 'data'
-                * variable.
-                */
-               ether_addr_copy(data, bssid + ADDR1);
+               get_address1(data, bssid);
        else
-               /*
-                * Extract the MAC Address in 'address3' field of the MAC
-                * header and update the MAC Address in the allocated 'data'
-                * variable.
-                */
-               ether_addr_copy(data, bssid + ADDR3);
+               get_address3(data, bssid);
 }
 
 /* This function extracts the SSID from a beacon/probe response frame        */
index 342a07c..72204fb 100644 (file)
@@ -4074,6 +4074,17 @@ reject:
        return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
 }
 
+static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
+{
+       bool ret;
+
+       spin_lock_bh(&conn->state_lock);
+       ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
+       spin_unlock_bh(&conn->state_lock);
+
+       return ret;
+}
+
 int iscsi_target_rx_thread(void *arg)
 {
        int ret, rc;
@@ -4091,7 +4102,7 @@ int iscsi_target_rx_thread(void *arg)
         * incoming iscsi/tcp socket I/O, and/or failing the connection.
         */
        rc = wait_for_completion_interruptible(&conn->rx_login_comp);
-       if (rc < 0)
+       if (rc < 0 || iscsi_target_check_conn_state(conn))
                return 0;
 
        if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
index 5c964c0..9fc9117 100644 (file)
@@ -388,6 +388,7 @@ err:
        if (login->login_complete) {
                if (conn->rx_thread && conn->rx_thread_active) {
                        send_sig(SIGINT, conn->rx_thread, 1);
+                       complete(&conn->rx_login_comp);
                        kthread_stop(conn->rx_thread);
                }
                if (conn->tx_thread && conn->tx_thread_active) {
index 51d1734..2cbea2a 100644 (file)
@@ -208,7 +208,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
        if (!pl) {
                pr_err("Unable to allocate memory for"
                                " struct iscsi_param_list.\n");
-               return -;
+               return -ENOMEM;
        }
        INIT_LIST_HEAD(&pl->param_list);
        INIT_LIST_HEAD(&pl->extra_response_list);
@@ -578,7 +578,7 @@ int iscsi_copy_param_list(
        param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
        if (!param_list) {
                pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
-               return -1;
+               return -ENOMEM;
        }
        INIT_LIST_HEAD(&param_list->param_list);
        INIT_LIST_HEAD(&param_list->extra_response_list);
@@ -629,7 +629,7 @@ int iscsi_copy_param_list(
 
 err_out:
        iscsi_release_param_list(param_list);
-       return -1;
+       return -ENOMEM;
 }
 
 static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
@@ -729,7 +729,7 @@ static int iscsi_add_notunderstood_response(
        if (!extra_response) {
                pr_err("Unable to allocate memory for"
                        " struct iscsi_extra_response.\n");
-               return -1;
+               return -ENOMEM;
        }
        INIT_LIST_HEAD(&extra_response->er_list);
 
@@ -1370,7 +1370,7 @@ int iscsi_decode_text_input(
        tmpbuf = kzalloc(length + 1, GFP_KERNEL);
        if (!tmpbuf) {
                pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
-               return -1;
+               return -ENOMEM;
        }
 
        memcpy(tmpbuf, textbuf, length);
index 0b4b2a6..98698d8 100644 (file)
@@ -371,7 +371,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
        return 0;
 }
 
-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
+static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
+                                          int *post_ret)
 {
        unsigned char *buf, *addr;
        struct scatterlist *sg;
@@ -437,7 +438,8 @@ sbc_execute_rw(struct se_cmd *cmd)
                               cmd->data_direction);
 }
 
-static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
+                                            int *post_ret)
 {
        struct se_device *dev = cmd->se_dev;
 
@@ -447,8 +449,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
         * sent to the backend driver.
         */
        spin_lock_irq(&cmd->t_state_lock);
-       if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
+       if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
                cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+               *post_ret = 1;
+       }
        spin_unlock_irq(&cmd->t_state_lock);
 
        /*
@@ -460,7 +464,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
        return TCM_NO_SENSE;
 }
 
-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
+static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
+                                                int *post_ret)
 {
        struct se_device *dev = cmd->se_dev;
        struct scatterlist *write_sg = NULL, *sg;
@@ -556,11 +561,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
 
                if (block_size < PAGE_SIZE) {
                        sg_set_page(&write_sg[i], m.page, block_size,
-                                   block_size);
+                                   m.piter.sg->offset + block_size);
                } else {
                        sg_miter_next(&m);
                        sg_set_page(&write_sg[i], m.page, block_size,
-                                   0);
+                                   m.piter.sg->offset);
                }
                len -= block_size;
                i++;
index 273c72b..81a6b3e 100644 (file)
@@ -246,7 +246,7 @@ static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
        char str[sizeof(dev->t10_wwn.model)+1];
 
        /* scsiLuProductId */
-       for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
+       for (i = 0; i < sizeof(dev->t10_wwn.model); i++)
                str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
                        dev->t10_wwn.model[i] : ' ';
        str[i] = '\0';
index 5b28203..28fb301 100644 (file)
@@ -130,6 +130,9 @@ void core_tmr_abort_task(
                if (tmr->ref_task_tag != ref_tag)
                        continue;
 
+               if (!kref_get_unless_zero(&se_cmd->cmd_kref))
+                       continue;
+
                printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
                        se_cmd->se_tfo->get_fabric_name(), ref_tag);
 
@@ -139,13 +142,15 @@ void core_tmr_abort_task(
                               " skipping\n", ref_tag);
                        spin_unlock(&se_cmd->t_state_lock);
                        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+                       target_put_sess_cmd(se_cmd);
+
                        goto out;
                }
                se_cmd->transport_state |= CMD_T_ABORTED;
                spin_unlock(&se_cmd->t_state_lock);
 
                list_del_init(&se_cmd->se_cmd_list);
-               kref_get(&se_cmd->cmd_kref);
                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
                cancel_work_sync(&se_cmd->work);
index 5bacc7b..4fdcee2 100644 (file)
@@ -1658,7 +1658,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
 void transport_generic_request_failure(struct se_cmd *cmd,
                sense_reason_t sense_reason)
 {
-       int ret = 0;
+       int ret = 0, post_ret = 0;
 
        pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
                " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
@@ -1680,7 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
         */
        if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
             cmd->transport_complete_callback)
-               cmd->transport_complete_callback(cmd, false);
+               cmd->transport_complete_callback(cmd, false, &post_ret);
 
        switch (sense_reason) {
        case TCM_NON_EXISTENT_LUN:
@@ -2068,11 +2068,13 @@ static void target_complete_ok_work(struct work_struct *work)
         */
        if (cmd->transport_complete_callback) {
                sense_reason_t rc;
+               bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
+               bool zero_dl = !(cmd->data_length);
+               int post_ret = 0;
 
-               rc = cmd->transport_complete_callback(cmd, true);
-               if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
-                       if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
-                           !cmd->data_length)
+               rc = cmd->transport_complete_callback(cmd, true, &post_ret);
+               if (!rc && !post_ret) {
+                       if (caw && zero_dl)
                                goto queue_rsp;
 
                        return;
@@ -2507,23 +2509,24 @@ out:
 EXPORT_SYMBOL(target_get_sess_cmd);
 
 static void target_release_cmd_kref(struct kref *kref)
-               __releases(&se_cmd->se_sess->sess_cmd_lock)
 {
        struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
        struct se_session *se_sess = se_cmd->se_sess;
+       unsigned long flags;
 
+       spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        if (list_empty(&se_cmd->se_cmd_list)) {
-               spin_unlock(&se_sess->sess_cmd_lock);
+               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
                se_cmd->se_tfo->release_cmd(se_cmd);
                return;
        }
        if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
-               spin_unlock(&se_sess->sess_cmd_lock);
+               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
                complete(&se_cmd->cmd_wait_comp);
                return;
        }
        list_del(&se_cmd->se_cmd_list);
-       spin_unlock(&se_sess->sess_cmd_lock);
+       spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
        se_cmd->se_tfo->release_cmd(se_cmd);
 }
@@ -2539,8 +2542,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
                se_cmd->se_tfo->release_cmd(se_cmd);
                return 1;
        }
-       return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
-                       &se_sess->sess_cmd_lock);
+       return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
 }
 EXPORT_SYMBOL(target_put_sess_cmd);
 
index 937cebf..5e6d6cb 100644 (file)
@@ -638,7 +638,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
        if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
                return 0;
 
-       if (!time_after(cmd->deadline, jiffies))
+       if (!time_after(jiffies, cmd->deadline))
                return 0;
 
        set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
@@ -1101,8 +1101,6 @@ tcmu_parse_cdb(struct se_cmd *cmd)
 
 static const struct target_backend_ops tcmu_ops = {
        .name                   = "user",
-       .inquiry_prod           = "USER",
-       .inquiry_rev            = TCMU_VERSION,
        .owner                  = THIS_MODULE,
        .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
        .attach_hba             = tcmu_attach_hba,
index c463c89..8cc4ac6 100644 (file)
@@ -382,7 +382,7 @@ endmenu
 
 config QCOM_SPMI_TEMP_ALARM
        tristate "Qualcomm SPMI PMIC Temperature Alarm"
-       depends on OF && (SPMI || COMPILE_TEST) && IIO
+       depends on OF && SPMI && IIO
        select REGMAP_SPMI
        help
          This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP)
index c8fe3ca..c5547bd 100644 (file)
@@ -55,6 +55,7 @@
 #define TEMPSENSE2_PANIC_VALUE_SHIFT   16
 #define TEMPSENSE2_PANIC_VALUE_MASK    0xfff0000
 
+#define OCOTP_MEM0                     0x0480
 #define OCOTP_ANA1                     0x04e0
 
 /* The driver supports 1 passive trip point and 1 critical trip point */
@@ -64,12 +65,6 @@ enum imx_thermal_trip {
        IMX_TRIP_NUM,
 };
 
-/*
- * It defines the temperature in millicelsius for passive trip point
- * that will trigger cooling action when crossed.
- */
-#define IMX_TEMP_PASSIVE               85000
-
 #define IMX_POLLING_DELAY              2000 /* millisecond */
 #define IMX_PASSIVE_DELAY              1000
 
@@ -100,12 +95,14 @@ struct imx_thermal_data {
        u32 c1, c2; /* See formula in imx_get_sensor_data() */
        int temp_passive;
        int temp_critical;
+       int temp_max;
        int alarm_temp;
        int last_temp;
        bool irq_enabled;
        int irq;
        struct clk *thermal_clk;
        const struct thermal_soc_data *socdata;
+       const char *temp_grade;
 };
 
 static void imx_set_panic_temp(struct imx_thermal_data *data,
@@ -285,10 +282,12 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
 {
        struct imx_thermal_data *data = tz->devdata;
 
+       /* do not allow changing critical threshold */
        if (trip == IMX_TRIP_CRITICAL)
                return -EPERM;
 
-       if (temp < 0 || temp > IMX_TEMP_PASSIVE)
+       /* do not allow passive to be set higher than critical */
+       if (temp < 0 || temp > data->temp_critical)
                return -EINVAL;
 
        data->temp_passive = temp;
@@ -404,17 +403,39 @@ static int imx_get_sensor_data(struct platform_device *pdev)
        data->c1 = temp64;
        data->c2 = n1 * data->c1 + 1000 * t1;
 
-       /*
-        * Set the default passive cooling trip point,
-        * can be changed from userspace.
-        */
-       data->temp_passive = IMX_TEMP_PASSIVE;
+       /* use OTP for thermal grade */
+       ret = regmap_read(map, OCOTP_MEM0, &val);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to read temp grade: %d\n", ret);
+               return ret;
+       }
+
+       /* The maximum die temp is specified by the Temperature Grade */
+       switch ((val >> 6) & 0x3) {
+       case 0: /* Commercial (0 to 95C) */
+               data->temp_grade = "Commercial";
+               data->temp_max = 95000;
+               break;
+       case 1: /* Extended Commercial (-20 to 105C) */
+               data->temp_grade = "Extended Commercial";
+               data->temp_max = 105000;
+               break;
+       case 2: /* Industrial (-40 to 105C) */
+               data->temp_grade = "Industrial";
+               data->temp_max = 105000;
+               break;
+       case 3: /* Automotive (-40 to 125C) */
+               data->temp_grade = "Automotive";
+               data->temp_max = 125000;
+               break;
+       }
 
        /*
-        * The maximum die temperature set to 20 C higher than
-        * IMX_TEMP_PASSIVE.
+        * Set the critical trip point at 5C under max
+        * Set the passive trip point at 10C under max (can change via sysfs)
         */
-       data->temp_critical = 1000 * 20 + data->temp_passive;
+       data->temp_critical = data->temp_max - (1000 * 5);
+       data->temp_passive = data->temp_max - (1000 * 10);
 
        return 0;
 }
@@ -551,6 +572,11 @@ static int imx_thermal_probe(struct platform_device *pdev)
                return ret;
        }
 
+       dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC"
+                " critical:%dC passive:%dC\n", data->temp_grade,
+                data->temp_max / 1000, data->temp_critical / 1000,
+                data->temp_passive / 1000);
+
        /* Enable measurements at ~ 10 Hz */
        regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
        measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
index 42b7d42..be4eedc 100644 (file)
@@ -964,7 +964,7 @@ void of_thermal_destroy_zones(void)
 
        np = of_find_node_by_name(NULL, "thermal-zones");
        if (!np) {
-               pr_err("unable to find thermal zones\n");
+               pr_debug("unable to find thermal zones\n");
                return;
        }
 
index f0fbea3..1246aa6 100644 (file)
@@ -174,7 +174,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
 /**
  * pid_controller() - PID controller
  * @tz:        thermal zone we are operating in
- * @current_temp:      the current temperature in millicelsius
  * @control_temp:      the target temperature in millicelsius
  * @max_allocatable_power:     maximum allocatable power for this thermal zone
  *
@@ -191,7 +190,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
  * Return: The power budget for the next period.
  */
 static u32 pid_controller(struct thermal_zone_device *tz,
-                         int current_temp,
                          int control_temp,
                          u32 max_allocatable_power)
 {
@@ -211,7 +209,7 @@ static u32 pid_controller(struct thermal_zone_device *tz,
                                       true);
        }
 
-       err = control_temp - current_temp;
+       err = control_temp - tz->temperature;
        err = int_to_frac(err);
 
        /* Calculate the proportional term */
@@ -332,7 +330,6 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
 }
 
 static int allocate_power(struct thermal_zone_device *tz,
-                         int current_temp,
                          int control_temp)
 {
        struct thermal_instance *instance;
@@ -418,8 +415,7 @@ static int allocate_power(struct thermal_zone_device *tz,
                i++;
        }
 
-       power_range = pid_controller(tz, current_temp, control_temp,
-                                    max_allocatable_power);
+       power_range = pid_controller(tz, control_temp, max_allocatable_power);
 
        divvy_up_power(weighted_req_power, max_power, num_actors,
                       total_weighted_req_power, power_range, granted_power,
@@ -444,8 +440,8 @@ static int allocate_power(struct thermal_zone_device *tz,
        trace_thermal_power_allocator(tz, req_power, total_req_power,
                                      granted_power, total_granted_power,
                                      num_actors, power_range,
-                                     max_allocatable_power, current_temp,
-                                     control_temp - current_temp);
+                                     max_allocatable_power, tz->temperature,
+                                     control_temp - tz->temperature);
 
        kfree(req_power);
 unlock:
@@ -612,7 +608,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz)
 static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
 {
        int ret;
-       int switch_on_temp, control_temp, current_temp;
+       int switch_on_temp, control_temp;
        struct power_allocator_params *params = tz->governor_data;
 
        /*
@@ -622,15 +618,9 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
        if (trip != params->trip_max_desired_temperature)
                return 0;
 
-       ret = thermal_zone_get_temp(tz, &current_temp);
-       if (ret) {
-               dev_warn(&tz->device, "Failed to get temperature: %d\n", ret);
-               return ret;
-       }
-
        ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
                                     &switch_on_temp);
-       if (!ret && (current_temp < switch_on_temp)) {
+       if (!ret && (tz->temperature < switch_on_temp)) {
                tz->passive = 0;
                reset_pid_controller(params);
                allow_maximum_power(tz);
@@ -648,7 +638,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
                return ret;
        }
 
-       return allocate_power(tz, current_temp, control_temp);
+       return allocate_power(tz, control_temp);
 }
 
 static struct thermal_governor thermal_gov_power_allocator = {
index 5d4ae7d..13d01ed 100644 (file)
@@ -361,6 +361,24 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data)
 /*
  *             platform functions
  */
+static int rcar_thermal_remove(struct platform_device *pdev)
+{
+       struct rcar_thermal_common *common = platform_get_drvdata(pdev);
+       struct device *dev = &pdev->dev;
+       struct rcar_thermal_priv *priv;
+
+       rcar_thermal_for_each_priv(priv, common) {
+               if (rcar_has_irq_support(priv))
+                       rcar_thermal_irq_disable(priv);
+               thermal_zone_device_unregister(priv->zone);
+       }
+
+       pm_runtime_put(dev);
+       pm_runtime_disable(dev);
+
+       return 0;
+}
+
 static int rcar_thermal_probe(struct platform_device *pdev)
 {
        struct rcar_thermal_common *common;
@@ -377,6 +395,8 @@ static int rcar_thermal_probe(struct platform_device *pdev)
        if (!common)
                return -ENOMEM;
 
+       platform_set_drvdata(pdev, common);
+
        INIT_LIST_HEAD(&common->head);
        spin_lock_init(&common->lock);
        common->dev = dev;
@@ -454,43 +474,16 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                rcar_thermal_common_write(common, ENR, enr_bits);
        }
 
-       platform_set_drvdata(pdev, common);
-
        dev_info(dev, "%d sensor probed\n", i);
 
        return 0;
 
 error_unregister:
-       rcar_thermal_for_each_priv(priv, common) {
-               if (rcar_has_irq_support(priv))
-                       rcar_thermal_irq_disable(priv);
-               thermal_zone_device_unregister(priv->zone);
-       }
-
-       pm_runtime_put(dev);
-       pm_runtime_disable(dev);
+       rcar_thermal_remove(pdev);
 
        return ret;
 }
 
-static int rcar_thermal_remove(struct platform_device *pdev)
-{
-       struct rcar_thermal_common *common = platform_get_drvdata(pdev);
-       struct device *dev = &pdev->dev;
-       struct rcar_thermal_priv *priv;
-
-       rcar_thermal_for_each_priv(priv, common) {
-               if (rcar_has_irq_support(priv))
-                       rcar_thermal_irq_disable(priv);
-               thermal_zone_device_unregister(priv->zone);
-       }
-
-       pm_runtime_put(dev);
-       pm_runtime_disable(dev);
-
-       return 0;
-}
-
 static const struct of_device_id rcar_thermal_dt_ids[] = {
        { .compatible = "renesas,rcar-thermal", },
        {},
index 9787e8a..e845841 100644 (file)
@@ -1,6 +1,9 @@
 /*
  * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
  *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ * Caesar Wang <wxt@rock-chips.com>
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  * version 2, as published by the Free Software Foundation.
@@ -45,17 +48,50 @@ enum tshut_polarity {
 };
 
 /**
- * The system has three Temperature Sensors.  channel 0 is reserved,
- * channel 1 is for CPU, and channel 2 is for GPU.
+ * The system has two Temperature Sensors.
+ * sensor0 is for CPU, and sensor1 is for GPU.
  */
 enum sensor_id {
-       SENSOR_CPU = 1,
+       SENSOR_CPU = 0,
        SENSOR_GPU,
 };
 
+/**
+* The conversion table has the adc value and temperature.
+* ADC_DECREMENT is the adc value decremnet.(e.g. v2_code_table)
+* ADC_INCREMNET is the adc value incremnet.(e.g. v3_code_table)
+*/
+enum adc_sort_mode {
+       ADC_DECREMENT = 0,
+       ADC_INCREMENT,
+};
+
+/**
+ * The max sensors is two in rockchip SoCs.
+ * Two sensors: CPU and GPU sensor.
+ */
+#define SOC_MAX_SENSORS        2
+
+struct chip_tsadc_table {
+       const struct tsadc_table *id;
+
+       /* the array table size*/
+       unsigned int length;
+
+       /* that analogic mask data */
+       u32 data_mask;
+
+       /* the sort mode is adc value that increment or decrement in table */
+       enum adc_sort_mode mode;
+};
+
 struct rockchip_tsadc_chip {
+       /* The sensor id of chip correspond to the ADC channel */
+       int chn_id[SOC_MAX_SENSORS];
+       int chn_num;
+
        /* The hardware-controlled tshut property */
-       long tshut_temp;
+       int tshut_temp;
        enum tshut_mode tshut_mode;
        enum tshut_polarity tshut_polarity;
 
@@ -65,37 +101,40 @@ struct rockchip_tsadc_chip {
        void (*control)(void __iomem *reg, bool on);
 
        /* Per-sensor methods */
-       int (*get_temp)(int chn, void __iomem *reg, int *temp);
-       void (*set_tshut_temp)(int chn, void __iomem *reg, long temp);
+       int (*get_temp)(struct chip_tsadc_table table,
+                       int chn, void __iomem *reg, int *temp);
+       void (*set_tshut_temp)(struct chip_tsadc_table table,
+                              int chn, void __iomem *reg, int temp);
        void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
+
+       /* Per-table methods */
+       struct chip_tsadc_table table;
 };
 
 struct rockchip_thermal_sensor {
        struct rockchip_thermal_data *thermal;
        struct thermal_zone_device *tzd;
-       enum sensor_id id;
+       int id;
 };
 
-#define NUM_SENSORS    2 /* Ignore unused sensor 0 */
-
 struct rockchip_thermal_data {
        const struct rockchip_tsadc_chip *chip;
        struct platform_device *pdev;
        struct reset_control *reset;
 
-       struct rockchip_thermal_sensor sensors[NUM_SENSORS];
+       struct rockchip_thermal_sensor sensors[SOC_MAX_SENSORS];
 
        struct clk *clk;
        struct clk *pclk;
 
        void __iomem *regs;
 
-       long tshut_temp;
+       int tshut_temp;
        enum tshut_mode tshut_mode;
        enum tshut_polarity tshut_polarity;
 };
 
-/* TSADC V2 Sensor info define: */
+/* TSADC Sensor info define: */
 #define TSADCV2_AUTO_CON                       0x04
 #define TSADCV2_INT_EN                         0x08
 #define TSADCV2_INT_PD                         0x0c
@@ -117,6 +156,8 @@ struct rockchip_thermal_data {
 #define TSADCV2_INT_PD_CLEAR_MASK              ~BIT(8)
 
 #define TSADCV2_DATA_MASK                      0xfff
+#define TSADCV3_DATA_MASK                      0x3ff
+
 #define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT       4
 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT     4
 #define TSADCV2_AUTO_PERIOD_TIME               250 /* msec */
@@ -124,7 +165,7 @@ struct rockchip_thermal_data {
 
 struct tsadc_table {
        u32 code;
-       long temp;
+       int temp;
 };
 
 static const struct tsadc_table v2_code_table[] = {
@@ -165,21 +206,61 @@ static const struct tsadc_table v2_code_table[] = {
        {3421, 125000},
 };
 
-static u32 rk_tsadcv2_temp_to_code(long temp)
+static const struct tsadc_table v3_code_table[] = {
+       {0, -40000},
+       {106, -40000},
+       {108, -35000},
+       {110, -30000},
+       {112, -25000},
+       {114, -20000},
+       {116, -15000},
+       {118, -10000},
+       {120, -5000},
+       {122, 0},
+       {124, 5000},
+       {126, 10000},
+       {128, 15000},
+       {130, 20000},
+       {132, 25000},
+       {134, 30000},
+       {136, 35000},
+       {138, 40000},
+       {140, 45000},
+       {142, 50000},
+       {144, 55000},
+       {146, 60000},
+       {148, 65000},
+       {150, 70000},
+       {152, 75000},
+       {154, 80000},
+       {156, 85000},
+       {158, 90000},
+       {160, 95000},
+       {162, 100000},
+       {163, 105000},
+       {165, 110000},
+       {167, 115000},
+       {169, 120000},
+       {171, 125000},
+       {TSADCV3_DATA_MASK, 125000},
+};
+
+static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
+                                  int temp)
 {
        int high, low, mid;
 
        low = 0;
-       high = ARRAY_SIZE(v2_code_table) - 1;
+       high = table.length - 1;
        mid = (high + low) / 2;
 
-       if (temp < v2_code_table[low].temp || temp > v2_code_table[high].temp)
+       if (temp < table.id[low].temp || temp > table.id[high].temp)
                return 0;
 
        while (low <= high) {
-               if (temp == v2_code_table[mid].temp)
-                       return v2_code_table[mid].code;
-               else if (temp < v2_code_table[mid].temp)
+               if (temp == table.id[mid].temp)
+                       return table.id[mid].code;
+               else if (temp < table.id[mid].temp)
                        high = mid - 1;
                else
                        low = mid + 1;
@@ -189,29 +270,54 @@ static u32 rk_tsadcv2_temp_to_code(long temp)
        return 0;
 }
 
-static int rk_tsadcv2_code_to_temp(u32 code, int *temp)
+static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
+                                  int *temp)
 {
        unsigned int low = 1;
-       unsigned int high = ARRAY_SIZE(v2_code_table) - 1;
+       unsigned int high = table.length - 1;
        unsigned int mid = (low + high) / 2;
        unsigned int num;
        unsigned long denom;
 
-       BUILD_BUG_ON(ARRAY_SIZE(v2_code_table) < 2);
-
-       code &= TSADCV2_DATA_MASK;
-       if (code < v2_code_table[high].code)
-               return -EAGAIN;         /* Incorrect reading */
-
-       while (low <= high) {
-               if (code >= v2_code_table[mid].code &&
-                   code < v2_code_table[mid - 1].code)
-                       break;
-               else if (code < v2_code_table[mid].code)
-                       low = mid + 1;
-               else
-                       high = mid - 1;
-               mid = (low + high) / 2;
+       WARN_ON(table.length < 2);
+
+       switch (table.mode) {
+       case ADC_DECREMENT:
+               code &= table.data_mask;
+               if (code < table.id[high].code)
+                       return -EAGAIN;         /* Incorrect reading */
+
+               while (low <= high) {
+                       if (code >= table.id[mid].code &&
+                           code < table.id[mid - 1].code)
+                               break;
+                       else if (code < table.id[mid].code)
+                               low = mid + 1;
+                       else
+                               high = mid - 1;
+
+                       mid = (low + high) / 2;
+               }
+               break;
+       case ADC_INCREMENT:
+               code &= table.data_mask;
+               if (code < table.id[low].code)
+                       return -EAGAIN;         /* Incorrect reading */
+
+               while (low <= high) {
+                       if (code >= table.id[mid - 1].code &&
+                           code < table.id[mid].code)
+                               break;
+                       else if (code > table.id[mid].code)
+                               low = mid + 1;
+                       else
+                               high = mid - 1;
+
+                       mid = (low + high) / 2;
+               }
+               break;
+       default:
+               pr_err("Invalid the conversion table\n");
        }
 
        /*
@@ -220,24 +326,28 @@ static int rk_tsadcv2_code_to_temp(u32 code, int *temp)
         * temperature between 2 table entries is linear and interpolate
         * to produce less granular result.
         */
-       num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp;
-       num *= v2_code_table[mid - 1].code - code;
-       denom = v2_code_table[mid - 1].code - v2_code_table[mid].code;
-       *temp = v2_code_table[mid - 1].temp + (num / denom);
+       num = table.id[mid].temp - v2_code_table[mid - 1].temp;
+       num *= abs(table.id[mid - 1].code - code);
+       denom = abs(table.id[mid - 1].code - table.id[mid].code);
+       *temp = table.id[mid - 1].temp + (num / denom);
 
        return 0;
 }
 
 /**
- * rk_tsadcv2_initialize - initialize TASDC Controller
- * (1) Set TSADCV2_AUTO_PERIOD, configure the interleave between
- * every two accessing of TSADC in normal operation.
- * (2) Set TSADCV2_AUTO_PERIOD_HT, configure the interleave between
- * every two accessing of TSADC after the temperature is higher
- * than COM_SHUT or COM_INT.
- * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE,
- * if the temperature is higher than COMP_INT or COMP_SHUT for
- * "debounce" times, TSADC controller will generate interrupt or TSHUT.
+ * rk_tsadcv2_initialize - initialize TASDC Controller.
+ *
+ * (1) Set TSADC_V2_AUTO_PERIOD:
+ *     Configure the interleave between every two accessing of
+ *     TSADC in normal operation.
+ *
+ * (2) Set TSADCV2_AUTO_PERIOD_HT:
+ *     Configure the interleave between every two accessing of
+ *     TSADC after the temperature is higher than COM_SHUT or COM_INT.
+ *
+ * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE:
+ *     If the temperature is higher than COMP_INT or COMP_SHUT for
+ *     "debounce" times, TSADC controller will generate interrupt or TSHUT.
  */
 static void rk_tsadcv2_initialize(void __iomem *regs,
                                  enum tshut_polarity tshut_polarity)
@@ -279,20 +389,22 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
        writel_relaxed(val, regs + TSADCV2_AUTO_CON);
 }
 
-static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, int *temp)
+static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
+                              int chn, void __iomem *regs, int *temp)
 {
        u32 val;
 
        val = readl_relaxed(regs + TSADCV2_DATA(chn));
 
-       return rk_tsadcv2_code_to_temp(val, temp);
+       return rk_tsadcv2_code_to_temp(table, val, temp);
 }
 
-static void rk_tsadcv2_tshut_temp(int chn, void __iomem *regs, long temp)
+static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
+                                 int chn, void __iomem *regs, int temp)
 {
        u32 tshut_value, val;
 
-       tshut_value = rk_tsadcv2_temp_to_code(temp);
+       tshut_value = rk_tsadcv2_temp_to_code(table, temp);
        writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
 
        /* TSHUT will be valid */
@@ -318,6 +430,10 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
 }
 
 static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
+       .chn_id[SENSOR_CPU] = 1, /* cpu sensor is channel 1 */
+       .chn_id[SENSOR_GPU] = 2, /* gpu sensor is channel 2 */
+       .chn_num = 2, /* two channels for tsadc */
+
        .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
        .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
        .tshut_temp = 95000,
@@ -328,6 +444,37 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
        .get_temp = rk_tsadcv2_get_temp,
        .set_tshut_temp = rk_tsadcv2_tshut_temp,
        .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+       .table = {
+               .id = v2_code_table,
+               .length = ARRAY_SIZE(v2_code_table),
+               .data_mask = TSADCV2_DATA_MASK,
+               .mode = ADC_DECREMENT,
+       },
+};
+
+static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
+       .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+       .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
+       .chn_num = 2, /* two channels for tsadc */
+
+       .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+       .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+       .tshut_temp = 95000,
+
+       .initialize = rk_tsadcv2_initialize,
+       .irq_ack = rk_tsadcv2_irq_ack,
+       .control = rk_tsadcv2_control,
+       .get_temp = rk_tsadcv2_get_temp,
+       .set_tshut_temp = rk_tsadcv2_tshut_temp,
+       .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+       .table = {
+               .id = v3_code_table,
+               .length = ARRAY_SIZE(v3_code_table),
+               .data_mask = TSADCV3_DATA_MASK,
+               .mode = ADC_INCREMENT,
+       },
 };
 
 static const struct of_device_id of_rockchip_thermal_match[] = {
@@ -335,6 +482,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = {
                .compatible = "rockchip,rk3288-tsadc",
                .data = (void *)&rk3288_tsadc_data,
        },
+       {
+               .compatible = "rockchip,rk3368-tsadc",
+               .data = (void *)&rk3368_tsadc_data,
+       },
        { /* end */ },
 };
 MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match);
@@ -357,7 +508,7 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev)
 
        thermal->chip->irq_ack(thermal->regs);
 
-       for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+       for (i = 0; i < thermal->chip->chn_num; i++)
                thermal_zone_device_update(thermal->sensors[i].tzd);
 
        return IRQ_HANDLED;
@@ -370,7 +521,8 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
        const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
        int retval;
 
-       retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp);
+       retval = tsadc->get_temp(tsadc->table,
+                                sensor->id, thermal->regs, out_temp);
        dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
                sensor->id, *out_temp, retval);
 
@@ -389,7 +541,7 @@ static int rockchip_configure_from_dt(struct device *dev,
 
        if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) {
                dev_warn(dev,
-                        "Missing tshut temp property, using default %ld\n",
+                        "Missing tshut temp property, using default %d\n",
                         thermal->chip->tshut_temp);
                thermal->tshut_temp = thermal->chip->tshut_temp;
        } else {
@@ -397,7 +549,7 @@ static int rockchip_configure_from_dt(struct device *dev,
        }
 
        if (thermal->tshut_temp > INT_MAX) {
-               dev_err(dev, "Invalid tshut temperature specified: %ld\n",
+               dev_err(dev, "Invalid tshut temperature specified: %d\n",
                        thermal->tshut_temp);
                return -ERANGE;
        }
@@ -442,13 +594,14 @@ static int
 rockchip_thermal_register_sensor(struct platform_device *pdev,
                                 struct rockchip_thermal_data *thermal,
                                 struct rockchip_thermal_sensor *sensor,
-                                enum sensor_id id)
+                                int id)
 {
        const struct rockchip_tsadc_chip *tsadc = thermal->chip;
        int error;
 
        tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
-       tsadc->set_tshut_temp(id, thermal->regs, thermal->tshut_temp);
+       tsadc->set_tshut_temp(tsadc->table, id, thermal->regs,
+                             thermal->tshut_temp);
 
        sensor->thermal = thermal;
        sensor->id = id;
@@ -481,7 +634,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
        const struct of_device_id *match;
        struct resource *res;
        int irq;
-       int i;
+       int i, j;
        int error;
 
        match = of_match_node(of_rockchip_thermal_match, np);
@@ -556,22 +709,19 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
 
        thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
 
-       error = rockchip_thermal_register_sensor(pdev, thermal,
-                                                &thermal->sensors[0],
-                                                SENSOR_CPU);
-       if (error) {
-               dev_err(&pdev->dev,
-                       "failed to register CPU thermal sensor: %d\n", error);
-               goto err_disable_pclk;
-       }
-
-       error = rockchip_thermal_register_sensor(pdev, thermal,
-                                                &thermal->sensors[1],
-                                                SENSOR_GPU);
-       if (error) {
-               dev_err(&pdev->dev,
-                       "failed to register GPU thermal sensor: %d\n", error);
-               goto err_unregister_cpu_sensor;
+       for (i = 0; i < thermal->chip->chn_num; i++) {
+               error = rockchip_thermal_register_sensor(pdev, thermal,
+                                               &thermal->sensors[i],
+                                               thermal->chip->chn_id[i]);
+               if (error) {
+                       dev_err(&pdev->dev,
+                               "failed to register sensor[%d] : error = %d\n",
+                               i, error);
+                       for (j = 0; j < i; j++)
+                               thermal_zone_of_sensor_unregister(&pdev->dev,
+                                               thermal->sensors[j].tzd);
+                       goto err_disable_pclk;
+               }
        }
 
        error = devm_request_threaded_irq(&pdev->dev, irq, NULL,
@@ -581,22 +731,23 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
        if (error) {
                dev_err(&pdev->dev,
                        "failed to request tsadc irq: %d\n", error);
-               goto err_unregister_gpu_sensor;
+               goto err_unregister_sensor;
        }
 
        thermal->chip->control(thermal->regs, true);
 
-       for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+       for (i = 0; i < thermal->chip->chn_num; i++)
                rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
 
        platform_set_drvdata(pdev, thermal);
 
        return 0;
 
-err_unregister_gpu_sensor:
-       thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd);
-err_unregister_cpu_sensor:
-       thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd);
+err_unregister_sensor:
+       while (i--)
+               thermal_zone_of_sensor_unregister(&pdev->dev,
+                                                 thermal->sensors[i].tzd);
+
 err_disable_pclk:
        clk_disable_unprepare(thermal->pclk);
 err_disable_clk:
@@ -610,7 +761,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev)
        struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) {
+       for (i = 0; i < thermal->chip->chn_num; i++) {
                struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
 
                rockchip_thermal_toggle_sensor(sensor, false);
@@ -631,7 +782,7 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
        struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+       for (i = 0; i < thermal->chip->chn_num; i++)
                rockchip_thermal_toggle_sensor(&thermal->sensors[i], false);
 
        thermal->chip->control(thermal->regs, false);
@@ -663,18 +814,19 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
 
        thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
 
-       for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) {
-               enum sensor_id id = thermal->sensors[i].id;
+       for (i = 0; i < thermal->chip->chn_num; i++) {
+               int id = thermal->sensors[i].id;
 
                thermal->chip->set_tshut_mode(id, thermal->regs,
                                              thermal->tshut_mode);
-               thermal->chip->set_tshut_temp(id, thermal->regs,
+               thermal->chip->set_tshut_temp(thermal->chip->table,
+                                             id, thermal->regs,
                                              thermal->tshut_temp);
        }
 
        thermal->chip->control(thermal->regs, true);
 
-       for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+       for (i = 0; i < thermal->chip->chn_num; i++)
                rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
 
        pinctrl_pm_select_default_state(dev);
index 1384426..ed77614 100644 (file)
@@ -169,7 +169,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty,
 {
        struct n_tty_data *ldata = tty->disc_data;
 
-       tty_audit_add_data(tty, to, n, ldata->icanon);
+       tty_audit_add_data(tty, from, n, ldata->icanon);
        return copy_to_user(to, from, n);
 }
 
index c0533a5..910bfee 100644 (file)
@@ -60,3 +60,4 @@ int fsl8250_handle_irq(struct uart_port *port)
        spin_unlock_irqrestore(&up->port.lock, flags);
        return 1;
 }
+EXPORT_SYMBOL_GPL(fsl8250_handle_irq);
index e6f5e12..6412f14 100644 (file)
@@ -373,6 +373,7 @@ config SERIAL_8250_MID
        depends on SERIAL_8250 && PCI
        select HSU_DMA if SERIAL_8250_DMA
        select HSU_DMA_PCI if X86_INTEL_MID
+       select RATIONAL
        help
          Selecting this option will enable handling of the extra features
          present on the UART found on Intel Medfield SOC and various other
index 1aec440..f38beb2 100644 (file)
@@ -1539,7 +1539,6 @@ config SERIAL_FSL_LPUART
        tristate "Freescale lpuart serial port support"
        depends on HAS_DMA
        select SERIAL_CORE
-       select SERIAL_EARLYCON
        help
          Support for the on-chip lpuart on some Freescale SOCs.
 
@@ -1547,6 +1546,7 @@ config SERIAL_FSL_LPUART_CONSOLE
        bool "Console on Freescale lpuart serial port"
        depends on SERIAL_FSL_LPUART=y
        select SERIAL_CORE_CONSOLE
+       select SERIAL_EARLYCON
        help
          If you have enabled the lpuart serial port on the Freescale SoCs,
          you can make it the console by answering Y to this option.
index 681e0f3..a1c0a89 100644 (file)
@@ -474,7 +474,7 @@ static int bcm_uart_startup(struct uart_port *port)
 
        /* register irq and enable rx interrupts */
        ret = request_irq(port->irq, bcm_uart_interrupt, 0,
-                         bcm_uart_type(port), port);
+                         dev_name(port->dev), port);
        if (ret)
                return ret;
        bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
index 6813e31..2f80bc7 100644 (file)
@@ -894,7 +894,7 @@ static int etraxfs_uart_probe(struct platform_device *pdev)
        up->regi_ser = of_iomap(np, 0);
        up->port.dev = &pdev->dev;
 
-       up->gpios = mctrl_gpio_init(&pdev->dev, 0);
+       up->gpios = mctrl_gpio_init_noauto(&pdev->dev, 0);
        if (IS_ERR(up->gpios))
                return PTR_ERR(up->gpios);
 
index 90ca082..3d245cd 100644 (file)
@@ -265,7 +265,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
  *
  *     Audit @data of @size from @tty, if necessary.
  */
-void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
+void tty_audit_add_data(struct tty_struct *tty, const void *data,
                        size_t size, unsigned icanon)
 {
        struct tty_audit_buf *buf;
index 0c41dbc..bcc8e1e 100644 (file)
@@ -1282,18 +1282,22 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
        int     was_stopped = tty->stopped;
 
        if (tty->ops->send_xchar) {
+               down_read(&tty->termios_rwsem);
                tty->ops->send_xchar(tty, ch);
+               up_read(&tty->termios_rwsem);
                return 0;
        }
 
        if (tty_write_lock(tty, 0) < 0)
                return -ERESTARTSYS;
 
+       down_read(&tty->termios_rwsem);
        if (was_stopped)
                start_tty(tty);
        tty->ops->write(tty, &ch, 1);
        if (was_stopped)
                stop_tty(tty);
+       up_read(&tty->termios_rwsem);
        tty_write_unlock(tty);
        return 0;
 }
index 9c5aebf..1445dd3 100644 (file)
@@ -1147,16 +1147,12 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
                        spin_unlock_irq(&tty->flow_lock);
                        break;
                case TCIOFF:
-                       down_read(&tty->termios_rwsem);
                        if (STOP_CHAR(tty) != __DISABLED_CHAR)
                                retval = tty_send_xchar(tty, STOP_CHAR(tty));
-                       up_read(&tty->termios_rwsem);
                        break;
                case TCION:
-                       down_read(&tty->termios_rwsem);
                        if (START_CHAR(tty) != __DISABLED_CHAR)
                                retval = tty_send_xchar(tty, START_CHAR(tty));
-                       up_read(&tty->termios_rwsem);
                        break;
                default:
                        return -EINVAL;
index 5af8f18..629e3c8 100644 (file)
@@ -592,7 +592,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        /* Restart the work queue in case no characters kick it off. Safe if
           already running */
-       schedule_work(&tty->port->buf.work);
+       tty_buffer_restart_work(tty->port);
 
        tty_unlock(tty);
        return retval;
index 6ccbf60..5a048b7 100644 (file)
@@ -84,6 +84,12 @@ struct ci_hdrc_imx_data {
        struct imx_usbmisc_data *usbmisc_data;
        bool supports_runtime_pm;
        bool in_lpm;
+       /* SoC before i.mx6 (except imx23/imx28) needs three clks */
+       bool need_three_clks;
+       struct clk *clk_ipg;
+       struct clk *clk_ahb;
+       struct clk *clk_per;
+       /* --------------------------------- */
 };
 
 /* Common functions shared by usbmisc drivers */
@@ -135,6 +141,102 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
 }
 
 /* End of common functions shared by usbmisc drivers*/
+static int imx_get_clks(struct device *dev)
+{
+       struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
+       int ret = 0;
+
+       data->clk_ipg = devm_clk_get(dev, "ipg");
+       if (IS_ERR(data->clk_ipg)) {
+               /* If the platform only needs one clocks */
+               data->clk = devm_clk_get(dev, NULL);
+               if (IS_ERR(data->clk)) {
+                       ret = PTR_ERR(data->clk);
+                       dev_err(dev,
+                               "Failed to get clks, err=%ld,%ld\n",
+                               PTR_ERR(data->clk), PTR_ERR(data->clk_ipg));
+                       return ret;
+               }
+               return ret;
+       }
+
+       data->clk_ahb = devm_clk_get(dev, "ahb");
+       if (IS_ERR(data->clk_ahb)) {
+               ret = PTR_ERR(data->clk_ahb);
+               dev_err(dev,
+                       "Failed to get ahb clock, err=%d\n", ret);
+               return ret;
+       }
+
+       data->clk_per = devm_clk_get(dev, "per");
+       if (IS_ERR(data->clk_per)) {
+               ret = PTR_ERR(data->clk_per);
+               dev_err(dev,
+                       "Failed to get per clock, err=%d\n", ret);
+               return ret;
+       }
+
+       data->need_three_clks = true;
+       return ret;
+}
+
+static int imx_prepare_enable_clks(struct device *dev)
+{
+       struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
+       int ret = 0;
+
+       if (data->need_three_clks) {
+               ret = clk_prepare_enable(data->clk_ipg);
+               if (ret) {
+                       dev_err(dev,
+                               "Failed to prepare/enable ipg clk, err=%d\n",
+                               ret);
+                       return ret;
+               }
+
+               ret = clk_prepare_enable(data->clk_ahb);
+               if (ret) {
+                       dev_err(dev,
+                               "Failed to prepare/enable ahb clk, err=%d\n",
+                               ret);
+                       clk_disable_unprepare(data->clk_ipg);
+                       return ret;
+               }
+
+               ret = clk_prepare_enable(data->clk_per);
+               if (ret) {
+                       dev_err(dev,
+                               "Failed to prepare/enable per clk, err=%d\n",
+                               ret);
+                       clk_disable_unprepare(data->clk_ahb);
+                       clk_disable_unprepare(data->clk_ipg);
+                       return ret;
+               }
+       } else {
+               ret = clk_prepare_enable(data->clk);
+               if (ret) {
+                       dev_err(dev,
+                               "Failed to prepare/enable clk, err=%d\n",
+                               ret);
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+static void imx_disable_unprepare_clks(struct device *dev)
+{
+       struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
+
+       if (data->need_three_clks) {
+               clk_disable_unprepare(data->clk_per);
+               clk_disable_unprepare(data->clk_ahb);
+               clk_disable_unprepare(data->clk_ipg);
+       } else {
+               clk_disable_unprepare(data->clk);
+       }
+}
 
 static int ci_hdrc_imx_probe(struct platform_device *pdev)
 {
@@ -145,31 +247,31 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
                .flags          = CI_HDRC_SET_NON_ZERO_TTHA,
        };
        int ret;
-       const struct of_device_id *of_id =
-                       of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev);
-       const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data;
+       const struct of_device_id *of_id;
+       const struct ci_hdrc_imx_platform_flag *imx_platform_flag;
+
+       of_id = of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev);
+       if (!of_id)
+               return -ENODEV;
+
+       imx_platform_flag = of_id->data;
 
        data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
+       platform_set_drvdata(pdev, data);
        data->usbmisc_data = usbmisc_get_init_data(&pdev->dev);
        if (IS_ERR(data->usbmisc_data))
                return PTR_ERR(data->usbmisc_data);
 
-       data->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(data->clk)) {
-               dev_err(&pdev->dev,
-                       "Failed to get clock, err=%ld\n", PTR_ERR(data->clk));
-               return PTR_ERR(data->clk);
-       }
+       ret = imx_get_clks(&pdev->dev);
+       if (ret)
+               return ret;
 
-       ret = clk_prepare_enable(data->clk);
-       if (ret) {
-               dev_err(&pdev->dev,
-                       "Failed to prepare or enable clock, err=%d\n", ret);
+       ret = imx_prepare_enable_clks(&pdev->dev);
+       if (ret)
                return ret;
-       }
 
        data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
        if (IS_ERR(data->phy)) {
@@ -212,8 +314,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
                goto disable_device;
        }
 
-       platform_set_drvdata(pdev, data);
-
        if (data->supports_runtime_pm) {
                pm_runtime_set_active(&pdev->dev);
                pm_runtime_enable(&pdev->dev);
@@ -226,7 +326,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
 disable_device:
        ci_hdrc_remove_device(data->ci_pdev);
 err_clk:
-       clk_disable_unprepare(data->clk);
+       imx_disable_unprepare_clks(&pdev->dev);
        return ret;
 }
 
@@ -240,7 +340,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
                pm_runtime_put_noidle(&pdev->dev);
        }
        ci_hdrc_remove_device(data->ci_pdev);
-       clk_disable_unprepare(data->clk);
+       imx_disable_unprepare_clks(&pdev->dev);
 
        return 0;
 }
@@ -252,7 +352,7 @@ static int imx_controller_suspend(struct device *dev)
 
        dev_dbg(dev, "at %s\n", __func__);
 
-       clk_disable_unprepare(data->clk);
+       imx_disable_unprepare_clks(dev);
        data->in_lpm = true;
 
        return 0;
@@ -270,7 +370,7 @@ static int imx_controller_resume(struct device *dev)
                return 0;
        }
 
-       ret = clk_prepare_enable(data->clk);
+       ret = imx_prepare_enable_clks(dev);
        if (ret)
                return ret;
 
@@ -285,7 +385,7 @@ static int imx_controller_resume(struct device *dev)
        return 0;
 
 clk_disable:
-       clk_disable_unprepare(data->clk);
+       imx_disable_unprepare_clks(dev);
        return ret;
 }
 
index 080b7be..58c8485 100644 (file)
@@ -322,8 +322,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
                return -EINVAL;
 
        pm_runtime_get_sync(ci->dev);
+       disable_irq(ci->irq);
        ci_role_stop(ci);
        ret = ci_role_start(ci, role);
+       enable_irq(ci->irq);
        pm_runtime_put_sync(ci->dev);
 
        return ret ? ret : count;
index 8223fe7..391a122 100644 (file)
@@ -1751,6 +1751,22 @@ static int ci_udc_start(struct usb_gadget *gadget,
        return retval;
 }
 
+static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
+{
+       if (!ci_otg_is_fsm_mode(ci))
+               return;
+
+       mutex_lock(&ci->fsm.lock);
+       if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
+               ci->fsm.a_bidl_adis_tmout = 1;
+               ci_hdrc_otg_fsm_start(ci);
+       } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
+               ci->fsm.protocol = PROTO_UNDEF;
+               ci->fsm.otg->state = OTG_STATE_UNDEFINED;
+       }
+       mutex_unlock(&ci->fsm.lock);
+}
+
 /**
  * ci_udc_stop: unregister a gadget driver
  */
@@ -1775,6 +1791,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
        ci->driver = NULL;
        spin_unlock_irqrestore(&ci->lock, flags);
 
+       ci_udc_stop_for_otg_fsm(ci);
        return 0;
 }
 
index fcea4eb..ab8b027 100644 (file)
@@ -500,7 +500,11 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
 {
        struct resource *res;
        struct imx_usbmisc *data;
-       struct of_device_id *tmp_dev;
+       const struct of_device_id *of_id;
+
+       of_id = of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
+       if (!of_id)
+               return -ENODEV;
 
        data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
@@ -513,9 +517,7 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
        if (IS_ERR(data->base))
                return PTR_ERR(data->base);
 
-       tmp_dev = (struct of_device_id *)
-               of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
-       data->ops = (const struct usbmisc_ops *)tmp_dev->data;
+       data->ops = (const struct usbmisc_ops *)of_id->data;
        platform_set_drvdata(pdev, data);
 
        return 0;
index 433bbc3..071964c 100644 (file)
@@ -884,11 +884,11 @@ static int usblp_wwait(struct usblp *usblp, int nonblock)
 
        add_wait_queue(&usblp->wwait, &waita);
        for (;;) {
-               set_current_state(TASK_INTERRUPTIBLE);
                if (mutex_lock_interruptible(&usblp->mut)) {
                        rc = -EINTR;
                        break;
                }
+               set_current_state(TASK_INTERRUPTIBLE);
                rc = usblp_wtest(usblp, nonblock);
                mutex_unlock(&usblp->mut);
                if (rc <= 0)
index a99c89e..dd28010 100644 (file)
@@ -77,8 +77,7 @@ config USB_OTG_BLACKLIST_HUB
 
 config USB_OTG_FSM
        tristate "USB 2.0 OTG FSM implementation"
-       depends on USB
-       select USB_OTG
+       depends on USB && USB_OTG
        select USB_PHY
        help
          Implements OTG Finite State Machine as specified in On-The-Go
index e79baf7..571c217 100644 (file)
@@ -324,12 +324,13 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg)
  */
 static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
 {
-       if (hsotg->lx_state == DWC2_L2) {
+       if (hsotg->bus_suspended) {
                hsotg->flags.b.port_suspend_change = 1;
                usb_hcd_resume_root_hub(hsotg->priv);
-       } else {
-               hsotg->flags.b.port_l1_change = 1;
        }
+
+       if (hsotg->lx_state == DWC2_L1)
+               hsotg->flags.b.port_l1_change = 1;
 }
 
 /**
@@ -1428,8 +1429,8 @@ static void dwc2_wakeup_detected(unsigned long data)
        dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
                dwc2_readl(hsotg->regs + HPRT0));
 
-       hsotg->bus_suspended = 0;
        dwc2_hcd_rem_wakeup(hsotg);
+       hsotg->bus_suspended = 0;
 
        /* Change to L0 state */
        hsotg->lx_state = DWC2_L0;
index 5859b0f..e61d773 100644 (file)
@@ -108,7 +108,8 @@ static const struct dwc2_core_params params_rk3066 = {
        .host_ls_low_power_phy_clk      = -1,
        .ts_dline                       = -1,
        .reload_ctl                     = -1,
-       .ahbcfg                         = 0x7, /* INCR16 */
+       .ahbcfg                         = GAHBCFG_HBSTLEN_INCR16 <<
+                                         GAHBCFG_HBSTLEN_SHIFT,
        .uframe_sched                   = -1,
        .external_id_pin_ctl            = -1,
        .hibernation                    = -1,
index 77a622c..009d830 100644 (file)
@@ -34,6 +34,8 @@
 #define PCI_DEVICE_ID_INTEL_BSW                        0x22b7
 #define PCI_DEVICE_ID_INTEL_SPTLP              0x9d30
 #define PCI_DEVICE_ID_INTEL_SPTH               0xa130
+#define PCI_DEVICE_ID_INTEL_BXT                        0x0aaa
+#define PCI_DEVICE_ID_INTEL_APL                        0x5aaa
 
 static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
 static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -210,6 +212,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
        {  }    /* Terminating Entry */
 };
index 55ba447..e24a01c 100644 (file)
@@ -2744,11 +2744,33 @@ int dwc3_gadget_init(struct dwc3 *dwc)
        }
 
        dwc->gadget.ops                 = &dwc3_gadget_ops;
-       dwc->gadget.max_speed           = USB_SPEED_SUPER;
        dwc->gadget.speed               = USB_SPEED_UNKNOWN;
        dwc->gadget.sg_supported        = true;
        dwc->gadget.name                = "dwc3-gadget";
 
+       /*
+        * FIXME We might be setting max_speed to <SUPER, however versions
+        * <2.20a of dwc3 have an issue with metastability (documented
+        * elsewhere in this driver) which tells us we can't set max speed to
+        * anything lower than SUPER.
+        *
+        * Because gadget.max_speed is only used by composite.c and function
+        * drivers (i.e. it won't go into dwc3's registers) we are allowing this
+        * to happen so we avoid sending SuperSpeed Capability descriptor
+        * together with our BOS descriptor as that could confuse host into
+        * thinking we can handle super speed.
+        *
+        * Note that, in fact, we won't even support GetBOS requests when speed
+        * is less than super speed because we don't have means, yet, to tell
+        * composite.c that we are USB 2.0 + LPM ECN.
+        */
+       if (dwc->revision < DWC3_REVISION_220A)
+               dwc3_trace(trace_dwc3_gadget,
+                               "Changing max_speed on rev %08x\n",
+                               dwc->revision);
+
+       dwc->gadget.max_speed           = dwc->maximum_speed;
+
        /*
         * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
         * on ep out.
index 23933bd..ddc3aad 100644 (file)
@@ -329,7 +329,7 @@ static int alloc_requests(struct usb_composite_dev *cdev,
        for (i = 0; i < loop->qlen && result == 0; i++) {
                result = -ENOMEM;
 
-               in_req = usb_ep_alloc_request(loop->in_ep, GFP_KERNEL);
+               in_req = usb_ep_alloc_request(loop->in_ep, GFP_ATOMIC);
                if (!in_req)
                        goto fail;
 
index f0f2b06..f92f5af 100644 (file)
@@ -1633,7 +1633,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
        spin_lock(&udc->lock);
 
        int_enb = usba_int_enb_get(udc);
-       status = usba_readl(udc, INT_STA) & int_enb;
+       status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED);
        DBG(DBG_INT, "irq, status=%#08x\n", status);
 
        if (status & USBA_DET_SUSPEND) {
index 5d2d7e9..0230965 100644 (file)
@@ -782,12 +782,15 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        status |= USB_PORT_STAT_SUSPEND;
                }
        }
-       if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0
-                       && (raw_port_status & PORT_POWER)
-                       && (bus_state->suspended_ports & (1 << wIndex))) {
-               bus_state->suspended_ports &= ~(1 << wIndex);
-               if (hcd->speed < HCD_USB3)
-                       bus_state->port_c_suspend |= 1 << wIndex;
+       if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 &&
+           (raw_port_status & PORT_POWER)) {
+               if (bus_state->suspended_ports & (1 << wIndex)) {
+                       bus_state->suspended_ports &= ~(1 << wIndex);
+                       if (hcd->speed < HCD_USB3)
+                               bus_state->port_c_suspend |= 1 << wIndex;
+               }
+               bus_state->resume_done[wIndex] = 0;
+               clear_bit(wIndex, &bus_state->resuming_ports);
        }
        if (raw_port_status & PORT_CONNECT) {
                status |= USB_PORT_STAT_CONNECTION;
index fa83625..6c5e813 100644 (file)
@@ -3896,28 +3896,6 @@ cleanup:
        return ret;
 }
 
-static int ep_ring_is_processing(struct xhci_hcd *xhci,
-               int slot_id, unsigned int ep_index)
-{
-       struct xhci_virt_device *xdev;
-       struct xhci_ring *ep_ring;
-       struct xhci_ep_ctx *ep_ctx;
-       struct xhci_virt_ep *xep;
-       dma_addr_t hw_deq;
-
-       xdev = xhci->devs[slot_id];
-       xep = &xhci->devs[slot_id]->eps[ep_index];
-       ep_ring = xep->ring;
-       ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
-
-       if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING)
-               return 0;
-
-       hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
-       return (hw_deq !=
-               xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue));
-}
-
 /*
  * Check transfer ring to guarantee there is enough room for the urb.
  * Update ISO URB start_frame and interval.
@@ -3983,10 +3961,12 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
        }
 
        /* Calculate the start frame and put it in urb->start_frame. */
-       if (HCC_CFC(xhci->hcc_params) &&
-                       ep_ring_is_processing(xhci, slot_id, ep_index)) {
-               urb->start_frame = xep->next_frame_id;
-               goto skip_start_over;
+       if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
+               if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
+                               EP_STATE_RUNNING) {
+                       urb->start_frame = xep->next_frame_id;
+                       goto skip_start_over;
+               }
        }
 
        start_frame = readl(&xhci->run_regs->microframe_index);
index 6e7dc6f..dfa44d3 100644 (file)
@@ -175,6 +175,16 @@ int xhci_reset(struct xhci_hcd *xhci)
        command |= CMD_RESET;
        writel(command, &xhci->op_regs->command);
 
+       /* Existing Intel xHCI controllers require a delay of 1 mS,
+        * after setting the CMD_RESET bit, and before accessing any
+        * HC registers. This allows the HC to complete the
+        * reset operation and be ready for HC register access.
+        * Without this delay, the subsequent HC register access,
+        * may result in a system hang very rarely.
+        */
+       if (xhci->quirks & XHCI_INTEL_HOST)
+               udelay(1000);
+
        ret = xhci_handshake(&xhci->op_regs->command,
                        CMD_RESET, 0, 10 * 1000 * 1000);
        if (ret)
index ba13529..18cfc0a 100644 (file)
@@ -132,7 +132,7 @@ static inline struct musb *dev_to_musb(struct device *dev)
 /*-------------------------------------------------------------------------*/
 
 #ifndef CONFIG_BLACKFIN
-static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
+static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
 {
        void __iomem *addr = phy->io_priv;
        int     i = 0;
@@ -151,7 +151,7 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
         * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
         */
 
-       musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
+       musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
        musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
                        MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
 
@@ -176,7 +176,7 @@ out:
        return ret;
 }
 
-static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
+static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
 {
        void __iomem *addr = phy->io_priv;
        int     i = 0;
@@ -191,8 +191,8 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
        power &= ~MUSB_POWER_SUSPENDM;
        musb_writeb(addr, MUSB_POWER, power);
 
-       musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
-       musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data);
+       musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
+       musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
        musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
 
        while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
@@ -1668,7 +1668,7 @@ EXPORT_SYMBOL_GPL(musb_interrupt);
 static bool use_dma = 1;
 
 /* "modprobe ... use_dma=0" etc */
-module_param(use_dma, bool, 0);
+module_param(use_dma, bool, 0644);
 MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
 
 void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
index 26c65e6..795a45b 100644 (file)
@@ -112,22 +112,32 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
        struct musb     *musb = ep->musb;
        void __iomem    *epio = ep->regs;
        u16             csr;
-       u16             lastcsr = 0;
        int             retries = 1000;
 
        csr = musb_readw(epio, MUSB_TXCSR);
        while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
-               if (csr != lastcsr)
-                       dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
-               lastcsr = csr;
                csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
                musb_writew(epio, MUSB_TXCSR, csr);
                csr = musb_readw(epio, MUSB_TXCSR);
-               if (WARN(retries-- < 1,
+
+               /*
+                * FIXME: sometimes the tx fifo flush failed, it has been
+                * observed during device disconnect on AM335x.
+                *
+                * To reproduce the issue, ensure tx urb(s) are queued when
+                * unplug the usb device which is connected to AM335x usb
+                * host port.
+                *
+                * I found using a usb-ethernet device and running iperf
+                * (client on AM335x) has very high chance to trigger it.
+                *
+                * Better to turn on dev_dbg() in musb_cleanup_urb() with
+                * CPPI enabled to see the issue when aborting the tx channel.
+                */
+               if (dev_WARN_ONCE(musb->controller, retries-- < 1,
                                "Could not flush host TX%d fifo: csr: %04x\n",
                                ep->epnum, csr))
                        return;
-               mdelay(1);
        }
 }
 
index 1731324..22e8ecb 100644 (file)
@@ -21,7 +21,6 @@ config AB8500_USB
 config FSL_USB2_OTG
        bool "Freescale USB OTG Transceiver Driver"
        depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
-       select USB_OTG
        select USB_PHY
        help
          Enable this to support Freescale USB OTG transceiver.
@@ -168,8 +167,7 @@ config USB_QCOM_8X16_PHY
 
 config USB_MV_OTG
        tristate "Marvell USB OTG support"
-       depends on USB_EHCI_MV && USB_MV_UDC && PM
-       select USB_OTG
+       depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
        select USB_PHY
        help
          Say Y here if you want to build Marvell USB OTG transciever
index 4d863eb..b7536af 100644 (file)
@@ -452,10 +452,13 @@ static int mxs_phy_probe(struct platform_device *pdev)
        struct clk *clk;
        struct mxs_phy *mxs_phy;
        int ret;
-       const struct of_device_id *of_id =
-                       of_match_device(mxs_phy_dt_ids, &pdev->dev);
+       const struct of_device_id *of_id;
        struct device_node *np = pdev->dev.of_node;
 
+       of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev);
+       if (!of_id)
+               return -ENODEV;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
index 1270906..c4bf2de 100644 (file)
@@ -105,7 +105,6 @@ static int omap_otg_probe(struct platform_device *pdev)
        extcon = extcon_get_extcon_dev(config->extcon);
        if (!extcon)
                return -EPROBE_DEFER;
-       otg_dev->extcon = extcon;
 
        otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
        if (!otg_dev)
@@ -115,6 +114,7 @@ static int omap_otg_probe(struct platform_device *pdev)
        if (IS_ERR(otg_dev->base))
                return PTR_ERR(otg_dev->base);
 
+       otg_dev->extcon = extcon;
        otg_dev->id_nb.notifier_call = omap_otg_id_notifier;
        otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier;
 
index 685fef7..f228060 100644 (file)
@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED        0x9001
 #define NOVATELWIRELESS_PRODUCT_E362           0x9010
 #define NOVATELWIRELESS_PRODUCT_E371           0x9011
+#define NOVATELWIRELESS_PRODUCT_U620L          0x9022
 #define NOVATELWIRELESS_PRODUCT_G2             0xA010
 #define NOVATELWIRELESS_PRODUCT_MC551          0xB001
 
@@ -354,6 +355,7 @@ static void option_instat_callback(struct urb *urb);
 /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
  * It seems to contain a Qualcomm QSC6240/6290 chipset            */
 #define FOUR_G_SYSTEMS_PRODUCT_W14             0x9603
+#define FOUR_G_SYSTEMS_PRODUCT_W100            0x9b01
 
 /* iBall 3.5G connect wireless modem */
 #define IBALL_3_5G_CONNECT                     0x9605
@@ -519,6 +521,11 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
        .sendsetup = BIT(0) | BIT(1),
 };
 
+static const struct option_blacklist_info four_g_w100_blacklist = {
+       .sendsetup = BIT(1) | BIT(2),
+       .reserved = BIT(3),
+};
+
 static const struct option_blacklist_info alcatel_x200_blacklist = {
        .sendsetup = BIT(0) | BIT(1),
        .reserved = BIT(4),
@@ -1052,6 +1059,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) },
 
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1641,6 +1649,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
          .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
        },
+       { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
+         .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
+       },
        { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
index 5022fcf..9919d2a 100644 (file)
@@ -22,6 +22,8 @@
 #define DRIVER_AUTHOR "Qualcomm Inc"
 #define DRIVER_DESC "Qualcomm USB Serial driver"
 
+#define QUECTEL_EC20_PID       0x9215
+
 /* standard device layouts supported by this driver */
 enum qcserial_layouts {
        QCSERIAL_G2K = 0,       /* Gobi 2000 */
@@ -171,6 +173,38 @@ static const struct usb_device_id id_table[] = {
 };
 MODULE_DEVICE_TABLE(usb, id_table);
 
+static int handle_quectel_ec20(struct device *dev, int ifnum)
+{
+       int altsetting = 0;
+
+       /*
+        * Quectel EC20 Mini PCIe LTE module layout:
+        * 0: DM/DIAG (use libqcdm from ModemManager for communication)
+        * 1: NMEA
+        * 2: AT-capable modem port
+        * 3: Modem interface
+        * 4: NDIS
+        */
+       switch (ifnum) {
+       case 0:
+               dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n");
+               break;
+       case 1:
+               dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n");
+               break;
+       case 2:
+       case 3:
+               dev_dbg(dev, "Quectel EC20 Modem port found\n");
+               break;
+       case 4:
+               /* Don't claim the QMI/net interface */
+               altsetting = -1;
+               break;
+       }
+
+       return altsetting;
+}
+
 static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
 {
        struct usb_host_interface *intf = serial->interface->cur_altsetting;
@@ -181,6 +215,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
        int altsetting = -1;
        bool sendsetup = false;
 
+       /* we only support vendor specific functions */
+       if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
+               goto done;
+
        nintf = serial->dev->actconfig->desc.bNumInterfaces;
        dev_dbg(dev, "Num Interfaces = %d\n", nintf);
        ifnum = intf->desc.bInterfaceNumber;
@@ -240,6 +278,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
                        altsetting = -1;
                break;
        case QCSERIAL_G2K:
+               /* handle non-standard layouts */
+               if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) {
+                       altsetting = handle_quectel_ec20(dev, ifnum);
+                       goto done;
+               }
+
                /*
                 * Gobi 2K+ USB layout:
                 * 0: QMI/net
@@ -301,29 +345,39 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
                break;
        case QCSERIAL_HWI:
                /*
-                * Huawei layout:
-                * 0: AT-capable modem port
-                * 1: DM/DIAG
-                * 2: AT-capable modem port
-                * 3: CCID-compatible PCSC interface
-                * 4: QMI/net
-                * 5: NMEA
+                * Huawei devices map functions by subclass + protocol
+                * instead of interface numbers. The protocol identify
+                * a specific function, while the subclass indicate a
+                * specific firmware source
+                *
+                * This is a blacklist of functions known to be
+                * non-serial.  The rest are assumed to be serial and
+                * will be handled by this driver
                 */
-               switch (ifnum) {
-               case 0:
-               case 2:
-                       dev_dbg(dev, "Modem port found\n");
-                       break;
-               case 1:
-                       dev_dbg(dev, "DM/DIAG interface found\n");
-                       break;
-               case 5:
-                       dev_dbg(dev, "NMEA GPS interface found\n");
-                       break;
-               default:
-                       /* don't claim any unsupported interface */
+               switch (intf->desc.bInterfaceProtocol) {
+                       /* QMI combined (qmi_wwan) */
+               case 0x07:
+               case 0x37:
+               case 0x67:
+                       /* QMI data (qmi_wwan) */
+               case 0x08:
+               case 0x38:
+               case 0x68:
+                       /* QMI control (qmi_wwan) */
+               case 0x09:
+               case 0x39:
+               case 0x69:
+                       /* NCM like (huawei_cdc_ncm) */
+               case 0x16:
+               case 0x46:
+               case 0x76:
                        altsetting = -1;
                        break;
+               default:
+                       dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n",
+                               intf->desc.bInterfaceClass,
+                               intf->desc.bInterfaceSubClass,
+                               intf->desc.bInterfaceProtocol);
                }
                break;
        default:
index e9da41d..2694df2 100644 (file)
@@ -159,6 +159,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
        { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
        { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
        { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
+       { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
        { }     /* terminator */
 };
 
@@ -191,6 +192,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
        { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
        { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
        { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
+       { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
        { }     /* terminator */
 };
 
index 4a2423e..98f35c6 100644 (file)
 #define ABBOTT_PRODUCT_ID              ABBOTT_STEREO_PLUG_ID
 #define ABBOTT_STRIP_PORT_ID           0x3420
 
+/* Honeywell vendor and product IDs */
+#define HONEYWELL_VENDOR_ID            0x10ac
+#define HONEYWELL_HGI80_PRODUCT_ID     0x0102  /* Honeywell HGI80 */
+
 /* Commands */
 #define TI_GET_VERSION                 0x01
 #define TI_GET_PORT_STATUS             0x02
index 7a8a6c6..1c427be 100644 (file)
@@ -446,7 +446,7 @@ config MAX63XX_WATCHDOG
 
 config IMX2_WDT
        tristate "IMX2+ Watchdog"
-       depends on ARCH_MXC
+       depends on ARCH_MXC || ARCH_LAYERSCAPE
        select REGMAP_MMIO
        select WATCHDOG_CORE
        help
index 6ad9df9..b751f43 100644 (file)
@@ -123,6 +123,7 @@ static int mtk_wdt_stop(struct watchdog_device *wdt_dev)
 
        reg = readl(wdt_base + WDT_MODE);
        reg &= ~WDT_MODE_EN;
+       reg |= WDT_MODE_KEY;
        iowrite32(reg, wdt_base + WDT_MODE);
 
        return 0;
index d96bee0..6f17c93 100644 (file)
@@ -205,7 +205,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog,
 
 static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog)
 {
-       struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+       struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
        void __iomem *base = wdev->base;
        u32 value;
 
index 4224b3e..313cd1c 100644 (file)
@@ -80,7 +80,7 @@ static unsigned int heartbeat = DEFAULT_HEARTBEAT;
 
 static DEFINE_SPINLOCK(io_lock);
 static void __iomem    *wdt_base;
-struct clk             *wdt_clk;
+static struct clk      *wdt_clk;
 
 static int pnx4008_wdt_start(struct watchdog_device *wdd)
 {
@@ -161,7 +161,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
        if (IS_ERR(wdt_clk))
                return PTR_ERR(wdt_clk);
 
-       ret = clk_enable(wdt_clk);
+       ret = clk_prepare_enable(wdt_clk);
        if (ret)
                return ret;
 
@@ -184,7 +184,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
        return 0;
 
 disable_clk:
-       clk_disable(wdt_clk);
+       clk_disable_unprepare(wdt_clk);
        return ret;
 }
 
@@ -192,7 +192,7 @@ static int pnx4008_wdt_remove(struct platform_device *pdev)
 {
        watchdog_unregister_device(&pnx4008_wdd);
 
-       clk_disable(wdt_clk);
+       clk_disable_unprepare(wdt_clk);
 
        return 0;
 }
index 7f97cdd..9ec5760 100644 (file)
@@ -140,8 +140,10 @@ static int tegra_wdt_set_timeout(struct watchdog_device *wdd,
 {
        wdd->timeout = timeout;
 
-       if (watchdog_active(wdd))
+       if (watchdog_active(wdd)) {
+               tegra_wdt_stop(wdd);
                return tegra_wdt_start(wdd);
+       }
 
        return 0;
 }
index 91bf55a..20e2bba 100644 (file)
@@ -224,7 +224,7 @@ static int wdt_keepalive(void)
 
 static int wdt_set_timeout(int t)
 {
-       int tmrval;
+       unsigned int tmrval;
 
        /*
         * Convert seconds to watchdog counter time units, rounding up.
index 849500e..524c221 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/irq.h>
 #include <asm/idle.h>
 #include <asm/io_apic.h>
+#include <asm/i8259.h>
 #include <asm/xen/pci.h>
 #endif
 #include <asm/sync_bitops.h>
@@ -420,7 +421,7 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
                return xen_allocate_irq_dynamic();
 
        /* Legacy IRQ descriptors are already allocated by the arch. */
-       if (gsi < NR_IRQS_LEGACY)
+       if (gsi < nr_legacy_irqs())
                irq = gsi;
        else
                irq = irq_alloc_desc_at(gsi, -1);
@@ -446,7 +447,7 @@ static void xen_free_irq(unsigned irq)
        kfree(info);
 
        /* Legacy IRQ descriptors are managed by the arch. */
-       if (irq < NR_IRQS_LEGACY)
+       if (irq < nr_legacy_irqs())
                return;
 
        irq_free_desc(irq);
index 00f40f0..38272ad 100644 (file)
@@ -49,6 +49,8 @@
 #include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/cpu.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
 
 #include <xen/xen.h>
 #include <xen/events.h>
 struct per_user_data {
        struct mutex bind_mutex; /* serialize bind/unbind operations */
        struct rb_root evtchns;
+       unsigned int nr_evtchns;
 
        /* Notification ring, accessed via /dev/xen/evtchn. */
-#define EVTCHN_RING_SIZE     (PAGE_SIZE / sizeof(evtchn_port_t))
-#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
+       unsigned int ring_size;
        evtchn_port_t *ring;
        unsigned int ring_cons, ring_prod, ring_overflow;
        struct mutex ring_cons_mutex; /* protect against concurrent readers */
@@ -80,10 +82,41 @@ struct user_evtchn {
        bool enabled;
 };
 
+static evtchn_port_t *evtchn_alloc_ring(unsigned int size)
+{
+       evtchn_port_t *ring;
+       size_t s = size * sizeof(*ring);
+
+       ring = kmalloc(s, GFP_KERNEL);
+       if (!ring)
+               ring = vmalloc(s);
+
+       return ring;
+}
+
+static void evtchn_free_ring(evtchn_port_t *ring)
+{
+       kvfree(ring);
+}
+
+static unsigned int evtchn_ring_offset(struct per_user_data *u,
+                                      unsigned int idx)
+{
+       return idx & (u->ring_size - 1);
+}
+
+static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u,
+                                       unsigned int idx)
+{
+       return u->ring + evtchn_ring_offset(u, idx);
+}
+
 static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
 {
        struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
 
+       u->nr_evtchns++;
+
        while (*new) {
                struct user_evtchn *this;
 
@@ -107,6 +140,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
 
 static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
 {
+       u->nr_evtchns--;
        rb_erase(&evtchn->node, &u->evtchns);
        kfree(evtchn);
 }
@@ -144,8 +178,8 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
 
        spin_lock(&u->ring_prod_lock);
 
-       if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
-               u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port;
+       if ((u->ring_prod - u->ring_cons) < u->ring_size) {
+               *evtchn_ring_entry(u, u->ring_prod) = evtchn->port;
                wmb(); /* Ensure ring contents visible */
                if (u->ring_cons == u->ring_prod++) {
                        wake_up_interruptible(&u->evtchn_wait);
@@ -200,10 +234,10 @@ static ssize_t evtchn_read(struct file *file, char __user *buf,
        }
 
        /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
-       if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
-               bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
+       if (((c ^ p) & u->ring_size) != 0) {
+               bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) *
                        sizeof(evtchn_port_t);
-               bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
+               bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
        } else {
                bytes1 = (p - c) * sizeof(evtchn_port_t);
                bytes2 = 0;
@@ -219,7 +253,7 @@ static ssize_t evtchn_read(struct file *file, char __user *buf,
 
        rc = -EFAULT;
        rmb(); /* Ensure that we see the port before we copy it. */
-       if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
+       if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
            ((bytes2 != 0) &&
             copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
                goto unlock_out;
@@ -278,6 +312,66 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
        return rc;
 }
 
+static int evtchn_resize_ring(struct per_user_data *u)
+{
+       unsigned int new_size;
+       evtchn_port_t *new_ring, *old_ring;
+       unsigned int p, c;
+
+       /*
+        * Ensure the ring is large enough to capture all possible
+        * events. i.e., one free slot for each bound event.
+        */
+       if (u->nr_evtchns <= u->ring_size)
+               return 0;
+
+       if (u->ring_size == 0)
+               new_size = 64;
+       else
+               new_size = 2 * u->ring_size;
+
+       new_ring = evtchn_alloc_ring(new_size);
+       if (!new_ring)
+               return -ENOMEM;
+
+       old_ring = u->ring;
+
+       /*
+        * Access to the ring contents is serialized by either the
+        * prod /or/ cons lock so take both when resizing.
+        */
+       mutex_lock(&u->ring_cons_mutex);
+       spin_lock_irq(&u->ring_prod_lock);
+
+       /*
+        * Copy the old ring contents to the new ring.
+        *
+        * If the ring contents crosses the end of the current ring,
+        * it needs to be copied in two chunks.
+        *
+        * +---------+    +------------------+
+        * |34567  12| -> |       1234567    |
+        * +-----p-c-+    +------------------+
+        */
+       p = evtchn_ring_offset(u, u->ring_prod);
+       c = evtchn_ring_offset(u, u->ring_cons);
+       if (p < c) {
+               memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
+               memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
+       } else
+               memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
+
+       u->ring = new_ring;
+       u->ring_size = new_size;
+
+       spin_unlock_irq(&u->ring_prod_lock);
+       mutex_unlock(&u->ring_cons_mutex);
+
+       evtchn_free_ring(old_ring);
+
+       return 0;
+}
+
 static int evtchn_bind_to_user(struct per_user_data *u, int port)
 {
        struct user_evtchn *evtchn;
@@ -305,6 +399,10 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
        if (rc < 0)
                goto err;
 
+       rc = evtchn_resize_ring(u);
+       if (rc < 0)
+               goto err;
+
        rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
                                       u->name, evtchn);
        if (rc < 0)
@@ -503,13 +601,6 @@ static int evtchn_open(struct inode *inode, struct file *filp)
 
        init_waitqueue_head(&u->evtchn_wait);
 
-       u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
-       if (u->ring == NULL) {
-               kfree(u->name);
-               kfree(u);
-               return -ENOMEM;
-       }
-
        mutex_init(&u->bind_mutex);
        mutex_init(&u->ring_cons_mutex);
        spin_lock_init(&u->ring_prod_lock);
@@ -532,7 +623,7 @@ static int evtchn_release(struct inode *inode, struct file *filp)
                evtchn_unbind_from_user(u, evtchn);
        }
 
-       free_page((unsigned long)u->ring);
+       evtchn_free_ring(u->ring);
        kfree(u->name);
        kfree(u);
 
index 2ea0b3b..1be5dd0 100644 (file)
@@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 
        vma->vm_ops = &gntdev_vmops;
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
 
        if (use_ptemod)
                vma->vm_flags |= VM_DONTCOPY;
index da3f32f..6ce72d8 100644 (file)
@@ -46,6 +46,12 @@ config FS_DAX
          or if unsure, say N.  Saying Y will increase the size of the kernel
          by about 5kB.
 
+config FS_DAX_PMD
+       bool
+       default FS_DAX
+       depends on FS_DAX
+       depends on BROKEN
+
 endif # BLOCK
 
 # Posix ACL utility routines
index bb0dfb1..c25639e 100644 (file)
@@ -390,9 +390,17 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
                        struct page *page)
 {
        const struct block_device_operations *ops = bdev->bd_disk->fops;
+       int result = -EOPNOTSUPP;
+
        if (!ops->rw_page || bdev_get_integrity(bdev))
-               return -EOPNOTSUPP;
-       return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
+               return result;
+
+       result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
+       if (result)
+               return result;
+       result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
+       blk_queue_exit(bdev->bd_queue);
+       return result;
 }
 EXPORT_SYMBOL_GPL(bdev_read_page);
 
@@ -421,14 +429,20 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
        int result;
        int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
        const struct block_device_operations *ops = bdev->bd_disk->fops;
+
        if (!ops->rw_page || bdev_get_integrity(bdev))
                return -EOPNOTSUPP;
+       result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
+       if (result)
+               return result;
+
        set_page_writeback(page);
        result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
        if (result)
                end_page_writeback(page);
        else
                unlock_page(page);
+       blk_queue_exit(bdev->bd_queue);
        return result;
 }
 EXPORT_SYMBOL_GPL(bdev_write_page);
index 6dcdb2e..d453d62 100644 (file)
@@ -355,7 +355,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
 
        index = srcu_read_lock(&fs_info->subvol_srcu);
 
-       root = btrfs_read_fs_root_no_name(fs_info, &root_key);
+       root = btrfs_get_fs_root(fs_info, &root_key, false);
        if (IS_ERR(root)) {
                srcu_read_unlock(&fs_info->subvol_srcu, index);
                ret = PTR_ERR(root);
index 8c58191..35489e7 100644 (file)
@@ -3416,6 +3416,7 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
 struct btrfs_block_group_cache *btrfs_lookup_block_group(
                                                 struct btrfs_fs_info *info,
                                                 u64 bytenr);
+void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
 int get_block_group_index(struct btrfs_block_group_cache *cache);
 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
@@ -3479,6 +3480,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root, u64 bytes_used,
                           u64 type, u64 chunk_objectid, u64 chunk_offset,
                           u64 size);
+struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
+                               struct btrfs_fs_info *fs_info,
+                               const u64 chunk_offset);
 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root, u64 group_start,
                             struct extent_map *em);
index acf3ed1..4b89680 100644 (file)
@@ -124,7 +124,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
        return (cache->flags & bits) == bits;
 }
 
-static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
+void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
 {
        atomic_inc(&cache->count);
 }
@@ -5915,19 +5915,6 @@ static int update_block_group(struct btrfs_trans_handle *trans,
                        set_extent_dirty(info->pinned_extents,
                                         bytenr, bytenr + num_bytes - 1,
                                         GFP_NOFS | __GFP_NOFAIL);
-                       /*
-                        * No longer have used bytes in this block group, queue
-                        * it for deletion.
-                        */
-                       if (old_val == 0) {
-                               spin_lock(&info->unused_bgs_lock);
-                               if (list_empty(&cache->bg_list)) {
-                                       btrfs_get_block_group(cache);
-                                       list_add_tail(&cache->bg_list,
-                                                     &info->unused_bgs);
-                               }
-                               spin_unlock(&info->unused_bgs_lock);
-                       }
                }
 
                spin_lock(&trans->transaction->dirty_bgs_lock);
@@ -5939,6 +5926,22 @@ static int update_block_group(struct btrfs_trans_handle *trans,
                }
                spin_unlock(&trans->transaction->dirty_bgs_lock);
 
+               /*
+                * No longer have used bytes in this block group, queue it for
+                * deletion. We do this after adding the block group to the
+                * dirty list to avoid races between cleaner kthread and space
+                * cache writeout.
+                */
+               if (!alloc && old_val == 0) {
+                       spin_lock(&info->unused_bgs_lock);
+                       if (list_empty(&cache->bg_list)) {
+                               btrfs_get_block_group(cache);
+                               list_add_tail(&cache->bg_list,
+                                             &info->unused_bgs);
+                       }
+                       spin_unlock(&info->unused_bgs_lock);
+               }
+
                btrfs_put_block_group(cache);
                total -= num_bytes;
                bytenr += num_bytes;
@@ -8105,21 +8108,47 @@ reada:
 }
 
 /*
- * TODO: Modify related function to add related node/leaf to dirty_extent_root,
- * for later qgroup accounting.
- *
- * Current, this function does nothing.
+ * These may not be seen by the usual inc/dec ref code so we have to
+ * add them here.
  */
+static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
+                                    struct btrfs_root *root, u64 bytenr,
+                                    u64 num_bytes)
+{
+       struct btrfs_qgroup_extent_record *qrecord;
+       struct btrfs_delayed_ref_root *delayed_refs;
+
+       qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
+       if (!qrecord)
+               return -ENOMEM;
+
+       qrecord->bytenr = bytenr;
+       qrecord->num_bytes = num_bytes;
+       qrecord->old_roots = NULL;
+
+       delayed_refs = &trans->transaction->delayed_refs;
+       spin_lock(&delayed_refs->lock);
+       if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
+               kfree(qrecord);
+       spin_unlock(&delayed_refs->lock);
+
+       return 0;
+}
+
 static int account_leaf_items(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root,
                              struct extent_buffer *eb)
 {
        int nr = btrfs_header_nritems(eb);
-       int i, extent_type;
+       int i, extent_type, ret;
        struct btrfs_key key;
        struct btrfs_file_extent_item *fi;
        u64 bytenr, num_bytes;
 
+       /* We can be called directly from walk_up_proc() */
+       if (!root->fs_info->quota_enabled)
+               return 0;
+
        for (i = 0; i < nr; i++) {
                btrfs_item_key_to_cpu(eb, &key, i);
 
@@ -8138,6 +8167,10 @@ static int account_leaf_items(struct btrfs_trans_handle *trans,
                        continue;
 
                num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
+
+               ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
+               if (ret)
+                       return ret;
        }
        return 0;
 }
@@ -8206,8 +8239,6 @@ static int adjust_slots_upwards(struct btrfs_root *root,
 
 /*
  * root_eb is the subtree root and is locked before this function is called.
- * TODO: Modify this function to mark all (including complete shared node)
- * to dirty_extent_root to allow it get accounted in qgroup.
  */
 static int account_shared_subtree(struct btrfs_trans_handle *trans,
                                  struct btrfs_root *root,
@@ -8285,6 +8316,11 @@ walk_down:
                        btrfs_tree_read_lock(eb);
                        btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
                        path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
+
+                       ret = record_one_subtree_extent(trans, root, child_bytenr,
+                                                       root->nodesize);
+                       if (ret)
+                               goto out;
                }
 
                if (level == 0) {
@@ -10256,6 +10292,47 @@ out:
        return ret;
 }
 
+struct btrfs_trans_handle *
+btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
+                                    const u64 chunk_offset)
+{
+       struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
+       struct extent_map *em;
+       struct map_lookup *map;
+       unsigned int num_items;
+
+       read_lock(&em_tree->lock);
+       em = lookup_extent_mapping(em_tree, chunk_offset, 1);
+       read_unlock(&em_tree->lock);
+       ASSERT(em && em->start == chunk_offset);
+
+       /*
+        * We need to reserve 3 + N units from the metadata space info in order
+        * to remove a block group (done at btrfs_remove_chunk() and at
+        * btrfs_remove_block_group()), which are used for:
+        *
+        * 1 unit for adding the free space inode's orphan (located in the tree
+        * of tree roots).
+        * 1 unit for deleting the block group item (located in the extent
+        * tree).
+        * 1 unit for deleting the free space item (located in tree of tree
+        * roots).
+        * N units for deleting N device extent items corresponding to each
+        * stripe (located in the device tree).
+        *
+        * In order to remove a block group we also need to reserve units in the
+        * system space info in order to update the chunk tree (update one or
+        * more device items and remove one chunk item), but this is done at
+        * btrfs_remove_chunk() through a call to check_system_chunk().
+        */
+       map = (struct map_lookup *)em->bdev;
+       num_items = 3 + map->num_stripes;
+       free_extent_map(em);
+
+       return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
+                                                          num_items, 1);
+}
+
 /*
  * Process the unused_bgs list and remove any that don't have any allocated
  * space inside of them.
@@ -10322,8 +10399,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
                 * Want to do this before we do anything else so we can recover
                 * properly if we fail to join the transaction.
                 */
-               /* 1 for btrfs_orphan_reserve_metadata() */
-               trans = btrfs_start_transaction(root, 1);
+               trans = btrfs_start_trans_remove_block_group(fs_info,
+                                                    block_group->key.objectid);
                if (IS_ERR(trans)) {
                        btrfs_dec_block_group_ro(root, block_group);
                        ret = PTR_ERR(trans);
index 977e715..72e7346 100644 (file)
@@ -1882,8 +1882,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        struct btrfs_log_ctx ctx;
        int ret = 0;
        bool full_sync = 0;
-       const u64 len = end - start + 1;
+       u64 len;
 
+       /*
+        * The range length can be represented by u64, we have to do the typecasts
+        * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
+        */
+       len = (u64)end - (u64)start + 1;
        trace_btrfs_sync_file(file, datasync);
 
        /*
@@ -2071,8 +2076,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                        }
                }
                if (!full_sync) {
-                       ret = btrfs_wait_ordered_range(inode, start,
-                                                      end - start + 1);
+                       ret = btrfs_wait_ordered_range(inode, start, len);
                        if (ret) {
                                btrfs_end_transaction(trans, root);
                                goto out;
index 994490d..a70c579 100644 (file)
@@ -4046,9 +4046,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
  */
 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
 {
-       struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(dir)->root;
-       int ret;
 
        /*
         * 1 for the possible orphan item
@@ -4057,27 +4055,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
         * 1 for the inode ref
         * 1 for the inode
         */
-       trans = btrfs_start_transaction(root, 5);
-       if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
-               return trans;
-
-       if (PTR_ERR(trans) == -ENOSPC) {
-               u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
-
-               trans = btrfs_start_transaction(root, 0);
-               if (IS_ERR(trans))
-                       return trans;
-               ret = btrfs_cond_migrate_bytes(root->fs_info,
-                                              &root->fs_info->trans_block_rsv,
-                                              num_bytes, 5);
-               if (ret) {
-                       btrfs_end_transaction(trans, root);
-                       return ERR_PTR(ret);
-               }
-               trans->block_rsv = &root->fs_info->trans_block_rsv;
-               trans->bytes_reserved = num_bytes;
-       }
-       return trans;
+       return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
 }
 
 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
index 93e12c1..5279fda 100644 (file)
@@ -993,9 +993,10 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
        mutex_lock(&fs_info->qgroup_ioctl_lock);
        if (!fs_info->quota_root)
                goto out;
-       spin_lock(&fs_info->qgroup_lock);
        fs_info->quota_enabled = 0;
        fs_info->pending_quota_state = 0;
+       btrfs_qgroup_wait_for_completion(fs_info);
+       spin_lock(&fs_info->qgroup_lock);
        quota_root = fs_info->quota_root;
        fs_info->quota_root = NULL;
        fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
@@ -1461,6 +1462,8 @@ struct btrfs_qgroup_extent_record
        struct btrfs_qgroup_extent_record *entry;
        u64 bytenr = record->bytenr;
 
+       assert_spin_locked(&delayed_refs->lock);
+
        while (*p) {
                parent_node = *p;
                entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
index 2907a77..b091d94 100644 (file)
@@ -3432,7 +3432,9 @@ out:
 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
                                          struct btrfs_device *scrub_dev,
                                          u64 chunk_offset, u64 length,
-                                         u64 dev_offset, int is_dev_replace)
+                                         u64 dev_offset,
+                                         struct btrfs_block_group_cache *cache,
+                                         int is_dev_replace)
 {
        struct btrfs_mapping_tree *map_tree =
                &sctx->dev_root->fs_info->mapping_tree;
@@ -3445,8 +3447,18 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
        em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
        read_unlock(&map_tree->map_tree.lock);
 
-       if (!em)
-               return -EINVAL;
+       if (!em) {
+               /*
+                * Might have been an unused block group deleted by the cleaner
+                * kthread or relocation.
+                */
+               spin_lock(&cache->lock);
+               if (!cache->removed)
+                       ret = -EINVAL;
+               spin_unlock(&cache->lock);
+
+               return ret;
+       }
 
        map = (struct map_lookup *)em->bdev;
        if (em->start != chunk_offset)
@@ -3483,6 +3495,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
        u64 length;
        u64 chunk_offset;
        int ret = 0;
+       int ro_set;
        int slot;
        struct extent_buffer *l;
        struct btrfs_key key;
@@ -3568,7 +3581,21 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                scrub_pause_on(fs_info);
                ret = btrfs_inc_block_group_ro(root, cache);
                scrub_pause_off(fs_info);
-               if (ret) {
+
+               if (ret == 0) {
+                       ro_set = 1;
+               } else if (ret == -ENOSPC) {
+                       /*
+                        * btrfs_inc_block_group_ro return -ENOSPC when it
+                        * failed in creating new chunk for metadata.
+                        * It is not a problem for scrub/replace, because
+                        * metadata are always cowed, and our scrub paused
+                        * commit_transactions.
+                        */
+                       ro_set = 0;
+               } else {
+                       btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n",
+                                  ret);
                        btrfs_put_block_group(cache);
                        break;
                }
@@ -3577,7 +3604,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                dev_replace->cursor_left = found_key.offset;
                dev_replace->item_needs_writeback = 1;
                ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
-                                 found_key.offset, is_dev_replace);
+                                 found_key.offset, cache, is_dev_replace);
 
                /*
                 * flush, submit all pending read and write bios, afterwards
@@ -3611,7 +3638,30 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 
                scrub_pause_off(fs_info);
 
-               btrfs_dec_block_group_ro(root, cache);
+               if (ro_set)
+                       btrfs_dec_block_group_ro(root, cache);
+
+               /*
+                * We might have prevented the cleaner kthread from deleting
+                * this block group if it was already unused because we raced
+                * and set it to RO mode first. So add it back to the unused
+                * list, otherwise it might not ever be deleted unless a manual
+                * balance is triggered or it becomes used and unused again.
+                */
+               spin_lock(&cache->lock);
+               if (!cache->removed && !cache->ro && cache->reserved == 0 &&
+                   btrfs_block_group_used(&cache->item) == 0) {
+                       spin_unlock(&cache->lock);
+                       spin_lock(&fs_info->unused_bgs_lock);
+                       if (list_empty(&cache->bg_list)) {
+                               btrfs_get_block_group(cache);
+                               list_add_tail(&cache->bg_list,
+                                             &fs_info->unused_bgs);
+                       }
+                       spin_unlock(&fs_info->unused_bgs_lock);
+               } else {
+                       spin_unlock(&cache->lock);
+               }
 
                btrfs_put_block_group(cache);
                if (ret)
index c8c3d70..8b72b00 100644 (file)
@@ -898,8 +898,10 @@ int btrfs_test_free_space_cache(void)
        }
 
        root = btrfs_alloc_dummy_root();
-       if (!root)
+       if (IS_ERR(root)) {
+               ret = PTR_ERR(root);
                goto out;
+       }
 
        root->fs_info = btrfs_alloc_dummy_fs_info();
        if (!root->fs_info)
index 418c6a2..3367a3c 100644 (file)
@@ -592,6 +592,38 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
        return start_transaction(root, num_items, TRANS_START,
                                 BTRFS_RESERVE_FLUSH_ALL);
 }
+struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
+                                       struct btrfs_root *root,
+                                       unsigned int num_items,
+                                       int min_factor)
+{
+       struct btrfs_trans_handle *trans;
+       u64 num_bytes;
+       int ret;
+
+       trans = btrfs_start_transaction(root, num_items);
+       if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
+               return trans;
+
+       trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans))
+               return trans;
+
+       num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
+       ret = btrfs_cond_migrate_bytes(root->fs_info,
+                                      &root->fs_info->trans_block_rsv,
+                                      num_bytes,
+                                      min_factor);
+       if (ret) {
+               btrfs_end_transaction(trans, root);
+               return ERR_PTR(ret);
+       }
+
+       trans->block_rsv = &root->fs_info->trans_block_rsv;
+       trans->bytes_reserved = num_bytes;
+
+       return trans;
+}
 
 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
                                        struct btrfs_root *root,
index b05b2f6..0da21ca 100644 (file)
@@ -185,6 +185,10 @@ int btrfs_end_transaction(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
                                                   unsigned int num_items);
+struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
+                                       struct btrfs_root *root,
+                                       unsigned int num_items,
+                                       int min_factor);
 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
                                        struct btrfs_root *root,
                                        unsigned int num_items);
index a6df8fd..4564522 100644 (file)
@@ -1973,8 +1973,7 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
        if (srcdev->writeable) {
                fs_devices->rw_devices--;
                /* zero out the old super if it is writable */
-               btrfs_scratch_superblocks(srcdev->bdev,
-                                       rcu_str_deref(srcdev->name));
+               btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
        }
 
        if (srcdev->bdev)
@@ -2024,8 +2023,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
        btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
 
        if (tgtdev->bdev) {
-               btrfs_scratch_superblocks(tgtdev->bdev,
-                                       rcu_str_deref(tgtdev->name));
+               btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
                fs_info->fs_devices->open_devices--;
        }
        fs_info->fs_devices->num_devices--;
@@ -2853,7 +2851,8 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
        if (ret)
                return ret;
 
-       trans = btrfs_start_transaction(root, 0);
+       trans = btrfs_start_trans_remove_block_group(root->fs_info,
+                                                    chunk_offset);
        if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
                btrfs_std_error(root->fs_info, ret, NULL);
@@ -3123,7 +3122,7 @@ static int chunk_profiles_filter(u64 chunk_type,
        return 1;
 }
 
-static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
                              struct btrfs_balance_args *bargs)
 {
        struct btrfs_block_group_cache *cache;
@@ -3156,7 +3155,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
        return ret;
 }
 
-static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info,
+static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
                u64 chunk_offset, struct btrfs_balance_args *bargs)
 {
        struct btrfs_block_group_cache *cache;
index ec57123..d5c84f6 100644 (file)
@@ -382,7 +382,7 @@ struct map_lookup {
 #define BTRFS_BALANCE_ARGS_LIMIT       (1ULL << 5)
 #define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6)
 #define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7)
-#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 8)
+#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 10)
 
 #define BTRFS_BALANCE_ARGS_MASK                        \
        (BTRFS_BALANCE_ARGS_PROFILES |          \
index c81ce7f..a7a1b21 100644 (file)
@@ -1636,6 +1636,116 @@ const struct file_operations configfs_dir_operations = {
        .iterate        = configfs_readdir,
 };
 
+/**
+ * configfs_register_group - creates a parent-child relation between two groups
+ * @parent_group:      parent group
+ * @group:             child group
+ *
+ * link groups, creates dentry for the child and attaches it to the
+ * parent dentry.
+ *
+ * Return: 0 on success, negative errno code on error
+ */
+int configfs_register_group(struct config_group *parent_group,
+                           struct config_group *group)
+{
+       struct configfs_subsystem *subsys = parent_group->cg_subsys;
+       struct dentry *parent;
+       int ret;
+
+       mutex_lock(&subsys->su_mutex);
+       link_group(parent_group, group);
+       mutex_unlock(&subsys->su_mutex);
+
+       parent = parent_group->cg_item.ci_dentry;
+
+       mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
+       ret = create_default_group(parent_group, group);
+       if (!ret) {
+               spin_lock(&configfs_dirent_lock);
+               configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
+               spin_unlock(&configfs_dirent_lock);
+       }
+       mutex_unlock(&d_inode(parent)->i_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(configfs_register_group);
+
+/**
+ * configfs_unregister_group() - unregisters a child group from its parent
+ * @group: parent group to be unregistered
+ *
+ * Undoes configfs_register_group()
+ */
+void configfs_unregister_group(struct config_group *group)
+{
+       struct configfs_subsystem *subsys = group->cg_subsys;
+       struct dentry *dentry = group->cg_item.ci_dentry;
+       struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
+
+       mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
+       spin_lock(&configfs_dirent_lock);
+       configfs_detach_prep(dentry, NULL);
+       spin_unlock(&configfs_dirent_lock);
+
+       configfs_detach_group(&group->cg_item);
+       d_inode(dentry)->i_flags |= S_DEAD;
+       dont_mount(dentry);
+       d_delete(dentry);
+       mutex_unlock(&d_inode(parent)->i_mutex);
+
+       dput(dentry);
+
+       mutex_lock(&subsys->su_mutex);
+       unlink_group(group);
+       mutex_unlock(&subsys->su_mutex);
+}
+EXPORT_SYMBOL(configfs_unregister_group);
+
+/**
+ * configfs_register_default_group() - allocates and registers a child group
+ * @parent_group:      parent group
+ * @name:              child group name
+ * @item_type:         child item type description
+ *
+ * boilerplate to allocate and register a child group with its parent. We need
+ * kzalloc'ed memory because child's default_group is initially empty.
+ *
+ * Return: allocated config group or ERR_PTR() on error
+ */
+struct config_group *
+configfs_register_default_group(struct config_group *parent_group,
+                               const char *name,
+                               struct config_item_type *item_type)
+{
+       int ret;
+       struct config_group *group;
+
+       group = kzalloc(sizeof(*group), GFP_KERNEL);
+       if (!group)
+               return ERR_PTR(-ENOMEM);
+       config_group_init_type_name(group, name, item_type);
+
+       ret = configfs_register_group(parent_group, group);
+       if (ret) {
+               kfree(group);
+               return ERR_PTR(ret);
+       }
+       return group;
+}
+EXPORT_SYMBOL(configfs_register_default_group);
+
+/**
+ * configfs_unregister_default_group() - unregisters and frees a child group
+ * @group:     the group to act on
+ */
+void configfs_unregister_default_group(struct config_group *group)
+{
+       configfs_unregister_group(group);
+       kfree(group);
+}
+EXPORT_SYMBOL(configfs_unregister_default_group);
+
 int configfs_register_subsystem(struct configfs_subsystem *subsys)
 {
        int err;
index d1e5cb7..43671b6 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -541,6 +541,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        unsigned long pfn;
        int result = 0;
 
+       /* dax pmd mappings are broken wrt gup and fork */
+       if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
+               return VM_FAULT_FALLBACK;
+
        /* Fall back to PTEs if we're going to COW */
        if (write && !(vma->vm_flags & VM_SHARED))
                return VM_FAULT_FALLBACK;
index 3a71cea..748d35a 100644 (file)
@@ -569,6 +569,8 @@ static int parse_options(char *options, struct super_block *sb)
                        /* Fall through */
                case Opt_dax:
 #ifdef CONFIG_FS_DAX
+                       ext2_msg(sb, KERN_WARNING,
+               "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
                        set_opt(sbi->s_mount_opt, DAX);
 #else
                        ext2_msg(sb, KERN_INFO, "dax option not supported");
index 753f4e6..c9ab67d 100644 (file)
@@ -1664,8 +1664,12 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                }
                sbi->s_jquota_fmt = m->mount_opt;
 #endif
-#ifndef CONFIG_FS_DAX
        } else if (token == Opt_dax) {
+#ifdef CONFIG_FS_DAX
+               ext4_msg(sb, KERN_WARNING,
+               "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
+                       sbi->s_mount_opt |= m->mount_opt;
+#else
                ext4_msg(sb, KERN_INFO, "dax option not supported");
                return -1;
 #endif
index 4afc4d9..8b2127f 100644 (file)
@@ -610,9 +610,9 @@ parse_record:
                int status = fat_parse_long(inode, &cpos, &bh, &de,
                                            &unicode, &nr_slots);
                if (status < 0) {
-                       ctx->pos = cpos;
+                       bh = NULL;
                        ret = status;
-                       goto out;
+                       goto end_of_dir;
                } else if (status == PARSE_INVALID)
                        goto record_end;
                else if (status == PARSE_NOT_LONGNAME)
@@ -654,8 +654,9 @@ parse_record:
        fill_len = short_len;
 
 start_filldir:
-       if (!fake_offset)
-               ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
+       ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
+       if (fake_offset && ctx->pos < 2)
+               ctx->pos = 2;
 
        if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) {
                if (!dir_emit_dot(file, ctx))
@@ -681,14 +682,19 @@ record_end:
        fake_offset = 0;
        ctx->pos = cpos;
        goto get_new;
+
 end_of_dir:
-       ctx->pos = cpos;
+       if (fake_offset && cpos < 2)
+               ctx->pos = 2;
+       else
+               ctx->pos = cpos;
 fill_failed:
        brelse(bh);
        if (unicode)
                __putname(unicode);
 out:
        mutex_unlock(&sbi->s_lock);
+
        return ret;
 }
 
index 316adb9..de4bdfa 100644 (file)
@@ -332,12 +332,17 @@ static void remove_huge_page(struct page *page)
  * truncation is indicated by end of range being LLONG_MAX
  *     In this case, we first scan the range and release found pages.
  *     After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
- *     maps and global counts.
+ *     maps and global counts.  Page faults can not race with truncation
+ *     in this routine.  hugetlb_no_page() prevents page faults in the
+ *     truncated range.  It checks i_size before allocation, and again after
+ *     with the page table lock for the page held.  The same lock must be
+ *     acquired to unmap a page.
  * hole punch is indicated if end is not LLONG_MAX
  *     In the hole punch case we scan the range and release found pages.
  *     Only when releasing a page is the associated region/reserv map
  *     deleted.  The region/reserv map for ranges without associated
- *     pages are not modified.
+ *     pages are not modified.  Page faults can race with hole punch.
+ *     This is indicated if we find a mapped page.
  * Note: If the passed end of range value is beyond the end of file, but
  * not LLONG_MAX this routine still performs a hole punch operation.
  */
@@ -361,46 +366,37 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
        next = start;
        while (next < end) {
                /*
-                * Make sure to never grab more pages that we
-                * might possibly need.
+                * Don't grab more pages than the number left in the range.
                 */
                if (end - next < lookup_nr)
                        lookup_nr = end - next;
 
                /*
-                * This pagevec_lookup() may return pages past 'end',
-                * so we must check for page->index > end.
+                * When no more pages are found, we are done.
                 */
-               if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) {
-                       if (next == start)
-                               break;
-                       next = start;
-                       continue;
-               }
+               if (!pagevec_lookup(&pvec, mapping, next, lookup_nr))
+                       break;
 
                for (i = 0; i < pagevec_count(&pvec); ++i) {
                        struct page *page = pvec.pages[i];
                        u32 hash;
 
+                       /*
+                        * The page (index) could be beyond end.  This is
+                        * only possible in the punch hole case as end is
+                        * max page offset in the truncate case.
+                        */
+                       next = page->index;
+                       if (next >= end)
+                               break;
+
                        hash = hugetlb_fault_mutex_hash(h, current->mm,
                                                        &pseudo_vma,
                                                        mapping, next, 0);
                        mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
                        lock_page(page);
-                       if (page->index >= end) {
-                               unlock_page(page);
-                               mutex_unlock(&hugetlb_fault_mutex_table[hash]);
-                               next = end;     /* we are done */
-                               break;
-                       }
-
-                       /*
-                        * If page is mapped, it was faulted in after being
-                        * unmapped.  Do nothing in this race case.  In the
-                        * normal case page is not mapped.
-                        */
-                       if (!page_mapped(page)) {
+                       if (likely(!page_mapped(page))) {
                                bool rsv_on_error = !PagePrivate(page);
                                /*
                                 * We must free the huge page and remove
@@ -421,17 +417,23 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
                                                hugetlb_fix_reserve_counts(
                                                        inode, rsv_on_error);
                                }
+                       } else {
+                               /*
+                                * If page is mapped, it was faulted in after
+                                * being unmapped.  It indicates a race between
+                                * hole punch and page fault.  Do nothing in
+                                * this case.  Getting here in a truncate
+                                * operation is a bug.
+                                */
+                               BUG_ON(truncate_op);
                        }
 
-                       if (page->index > next)
-                               next = page->index;
-
-                       ++next;
                        unlock_page(page);
-
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                }
+               ++next;
                huge_pagevec_release(&pvec);
+               cond_resched();
        }
 
        if (truncate_op)
@@ -647,9 +649,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
        if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
                i_size_write(inode, offset + len);
        inode->i_ctime = CURRENT_TIME;
-       spin_lock(&inode->i_lock);
-       inode->i_private = NULL;
-       spin_unlock(&inode->i_lock);
 out:
        mutex_unlock(&inode->i_mutex);
        return error;
index 79b1130..0a3f9b5 100644 (file)
@@ -525,6 +525,8 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
                        switch (rqdata.cmd) {
                                case NCP_LOCK_EX:
                                case NCP_LOCK_SH:
+                                               if (rqdata.timeout < 0)
+                                                       return -EINVAL;
                                                if (rqdata.timeout == 0)
                                                        rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT;
                                                else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT)
index 646cdac..beac58b 100644 (file)
@@ -78,7 +78,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
 
        p = xdr_inline_decode(xdr, nbytes);
        if (unlikely(p == NULL))
-               printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
+               printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed "
+                                                       "or truncated request.\n");
        return p;
 }
 
@@ -889,6 +890,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
        struct cb_compound_hdr_arg hdr_arg = { 0 };
        struct cb_compound_hdr_res hdr_res = { NULL };
        struct xdr_stream xdr_in, xdr_out;
+       struct xdr_buf *rq_arg = &rqstp->rq_arg;
        __be32 *p, status;
        struct cb_process_state cps = {
                .drc_status = 0,
@@ -900,7 +902,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
 
        dprintk("%s: start\n", __func__);
 
-       xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
+       rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len;
+       xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
 
        p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
        xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
index 326d9e1..31b0a52 100644 (file)
@@ -618,7 +618,10 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
                nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
                nfs_vmtruncate(inode, attr->ia_size);
        }
-       nfs_update_inode(inode, fattr);
+       if (fattr->valid)
+               nfs_update_inode(inode, fattr);
+       else
+               NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
        spin_unlock(&inode->i_lock);
 }
 EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
@@ -1824,7 +1827,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
                        nfsi->attr_gencount = fattr->gencount;
        }
-       invalid &= ~NFS_INO_INVALID_ATTR;
+
+       /* Don't declare attrcache up to date if there were no attrs! */
+       if (fattr->valid != 0)
+               invalid &= ~NFS_INO_INVALID_ATTR;
+
        /* Don't invalidate the data if we were to blame */
        if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
                                || S_ISLNK(inode->i_mode)))
index 3e92a3c..6b1ce98 100644 (file)
@@ -14,7 +14,7 @@
 #include "pnfs.h"
 #include "internal.h"
 
-#define NFSDBG_FACILITY NFSDBG_PNFS
+#define NFSDBG_FACILITY NFSDBG_PROC
 
 static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file,
                                fmode_t fmode)
@@ -284,6 +284,7 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
                .dst_fh = NFS_FH(dst_inode),
                .src_offset = src_offset,
                .dst_offset = dst_offset,
+               .count = count,
                .dst_bitmask = server->cache_consistency_bitmask,
        };
        struct nfs42_clone_res res = {
index 223bedd..10410e8 100644 (file)
@@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
                return ret;
        idr_preload(GFP_KERNEL);
        spin_lock(&nn->nfs_client_lock);
-       ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
+       ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
        if (ret >= 0)
                clp->cl_cb_ident = ret;
        spin_unlock(&nn->nfs_client_lock);
index 4aa5719..db9b5fe 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/file.h>
 #include <linux/falloc.h>
 #include <linux/nfs_fs.h>
+#include <uapi/linux/btrfs.h>  /* BTRFS_IOC_CLONE/BTRFS_IOC_CLONE_RANGE */
 #include "delegation.h"
 #include "internal.h"
 #include "iostat.h"
@@ -203,6 +204,7 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
        struct fd src_file;
        struct inode *src_inode;
        unsigned int bs = server->clone_blksize;
+       bool same_inode = false;
        int ret;
 
        /* dst file must be opened for writing */
@@ -221,10 +223,8 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
 
        src_inode = file_inode(src_file.file);
 
-       /* src and dst must be different files */
-       ret = -EINVAL;
        if (src_inode == dst_inode)
-               goto out_fput;
+               same_inode = true;
 
        /* src file must be opened for reading */
        if (!(src_file.file->f_mode & FMODE_READ))
@@ -249,8 +249,16 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
                        goto out_fput;
        }
 
+       /* verify if ranges are overlapped within the same file */
+       if (same_inode) {
+               if (dst_off + count > src_off && dst_off < src_off + count)
+                       goto out_fput;
+       }
+
        /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */
-       if (dst_inode < src_inode) {
+       if (same_inode) {
+               mutex_lock(&src_inode->i_mutex);
+       } else if (dst_inode < src_inode) {
                mutex_lock_nested(&dst_inode->i_mutex, I_MUTEX_PARENT);
                mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
        } else {
@@ -275,7 +283,9 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
                truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1);
 
 out_unlock:
-       if (dst_inode < src_inode) {
+       if (same_inode) {
+               mutex_unlock(&src_inode->i_mutex);
+       } else if (dst_inode < src_inode) {
                mutex_unlock(&src_inode->i_mutex);
                mutex_unlock(&dst_inode->i_mutex);
        } else {
@@ -291,46 +301,31 @@ out_drop_write:
 
 static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp)
 {
-       struct nfs_ioctl_clone_range_args args;
+       struct btrfs_ioctl_clone_range_args args;
 
        if (copy_from_user(&args, argp, sizeof(args)))
                return -EFAULT;
 
-       return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_off, args.dst_off, args.count);
-}
-#else
-static long nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
-               u64 src_off, u64 dst_off, u64 count)
-{
-       return -ENOTTY;
-}
-
-static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp)
-{
-       return -ENOTTY;
+       return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_offset,
+                                args.dest_offset, args.src_length);
 }
-#endif /* CONFIG_NFS_V4_2 */
 
 long nfs4_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        void __user *argp = (void __user *)arg;
 
        switch (cmd) {
-       case NFS_IOC_CLONE:
+       case BTRFS_IOC_CLONE:
                return nfs42_ioctl_clone(file, arg, 0, 0, 0);
-       case NFS_IOC_CLONE_RANGE:
+       case BTRFS_IOC_CLONE_RANGE:
                return nfs42_ioctl_clone_range(file, argp);
        }
 
        return -ENOTTY;
 }
+#endif /* CONFIG_NFS_V4_2 */
 
 const struct file_operations nfs4_file_operations = {
-#ifdef CONFIG_NFS_V4_2
-       .llseek         = nfs4_file_llseek,
-#else
-       .llseek         = nfs_file_llseek,
-#endif
        .read_iter      = nfs_file_read,
        .write_iter     = nfs_file_write,
        .mmap           = nfs_file_mmap,
@@ -342,14 +337,14 @@ const struct file_operations nfs4_file_operations = {
        .flock          = nfs_flock,
        .splice_read    = nfs_file_splice_read,
        .splice_write   = iter_file_splice_write,
-#ifdef CONFIG_NFS_V4_2
-       .fallocate      = nfs42_fallocate,
-#endif /* CONFIG_NFS_V4_2 */
        .check_flags    = nfs_check_flags,
        .setlease       = simple_nosetlease,
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NFS_V4_2
+       .llseek         = nfs4_file_llseek,
+       .fallocate      = nfs42_fallocate,
        .unlocked_ioctl = nfs4_ioctl,
-#else
        .compat_ioctl   = nfs4_ioctl,
-#endif /* CONFIG_COMPAT */
+#else
+       .llseek         = nfs_file_llseek,
+#endif
 };
index 765a035..8981803 100644 (file)
@@ -7866,7 +7866,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
                        spin_unlock(&inode->i_lock);
                goto out_restart;
        }
-       if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
+       if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN)
                goto out_restart;
 out:
        dprintk("<-- %s\n", __func__);
index dfed4f5..4e44412 100644 (file)
@@ -3615,6 +3615,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
        status = 0;
        if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS)))
                goto out;
+       bitmap[0] &= ~FATTR4_WORD0_FS_LOCATIONS;
        status = -EIO;
        /* Ignore borken servers that return unrequested attrs */
        if (unlikely(res == NULL))
index 93496c0..5a8ae21 100644 (file)
@@ -872,33 +872,38 @@ send_layoutget(struct pnfs_layout_hdr *lo,
 
        dprintk("--> %s\n", __func__);
 
-       lgp = kzalloc(sizeof(*lgp), gfp_flags);
-       if (lgp == NULL)
-               return NULL;
+       /*
+        * Synchronously retrieve layout information from server and
+        * store in lseg. If we race with a concurrent seqid morphing
+        * op, then re-send the LAYOUTGET.
+        */
+       do {
+               lgp = kzalloc(sizeof(*lgp), gfp_flags);
+               if (lgp == NULL)
+                       return NULL;
+
+               i_size = i_size_read(ino);
+
+               lgp->args.minlength = PAGE_CACHE_SIZE;
+               if (lgp->args.minlength > range->length)
+                       lgp->args.minlength = range->length;
+               if (range->iomode == IOMODE_READ) {
+                       if (range->offset >= i_size)
+                               lgp->args.minlength = 0;
+                       else if (i_size - range->offset < lgp->args.minlength)
+                               lgp->args.minlength = i_size - range->offset;
+               }
+               lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
+               lgp->args.range = *range;
+               lgp->args.type = server->pnfs_curr_ld->id;
+               lgp->args.inode = ino;
+               lgp->args.ctx = get_nfs_open_context(ctx);
+               lgp->gfp_flags = gfp_flags;
+               lgp->cred = lo->plh_lc_cred;
 
-       i_size = i_size_read(ino);
+               lseg = nfs4_proc_layoutget(lgp, gfp_flags);
+       } while (lseg == ERR_PTR(-EAGAIN));
 
-       lgp->args.minlength = PAGE_CACHE_SIZE;
-       if (lgp->args.minlength > range->length)
-               lgp->args.minlength = range->length;
-       if (range->iomode == IOMODE_READ) {
-               if (range->offset >= i_size)
-                       lgp->args.minlength = 0;
-               else if (i_size - range->offset < lgp->args.minlength)
-                       lgp->args.minlength = i_size - range->offset;
-       }
-       lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
-       lgp->args.range = *range;
-       lgp->args.type = server->pnfs_curr_ld->id;
-       lgp->args.inode = ino;
-       lgp->args.ctx = get_nfs_open_context(ctx);
-       lgp->gfp_flags = gfp_flags;
-       lgp->cred = lo->plh_lc_cred;
-
-       /* Synchronously retrieve layout information from server and
-        * store in lseg.
-        */
-       lseg = nfs4_proc_layoutget(lgp, gfp_flags);
        if (IS_ERR(lseg)) {
                switch (PTR_ERR(lseg)) {
                case -ENOMEM:
@@ -1687,6 +1692,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
                /* existing state ID, make sure the sequence number matches. */
                if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
                        dprintk("%s forget reply due to sequence\n", __func__);
+                       status = -EAGAIN;
                        goto out_forget_reply;
                }
                pnfs_set_layout_stateid(lo, &res->stateid, false);
index 3b48ac2..a03f6f4 100644 (file)
@@ -372,6 +372,8 @@ static int ocfs2_mknod(struct inode *dir,
                mlog_errno(status);
                goto leave;
        }
+       /* update inode->i_mode after mask with "umask". */
+       inode->i_mode = mode;
 
        handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
                                                            S_ISDIR(mode),
index 801c21c..4cf700d 100644 (file)
@@ -809,6 +809,13 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
  */
 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
 {
+       /*
+        * Check for signal early to make process killable when there are
+        * always buffers available
+        */
+       if (signal_pending(current))
+               return -ERESTARTSYS;
+
        while (!pipe->nrbufs) {
                if (!pipe->writers)
                        return 0;
@@ -884,6 +891,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
 
        splice_from_pipe_begin(sd);
        do {
+               cond_resched();
                ret = splice_from_pipe_next(pipe, sd);
                if (ret > 0)
                        ret = splice_from_pipe_feed(pipe, sd, actor);
index 590ad92..02fa1dc 100644 (file)
@@ -162,15 +162,8 @@ void sysv_set_inode(struct inode *inode, dev_t rdev)
                inode->i_fop = &sysv_dir_operations;
                inode->i_mapping->a_ops = &sysv_aops;
        } else if (S_ISLNK(inode->i_mode)) {
-               if (inode->i_blocks) {
-                       inode->i_op = &sysv_symlink_inode_operations;
-                       inode->i_mapping->a_ops = &sysv_aops;
-               } else {
-                       inode->i_op = &simple_symlink_inode_operations;
-                       inode->i_link = (char *)SYSV_I(inode)->i_data;
-                       nd_terminate_link(inode->i_link, inode->i_size,
-                               sizeof(SYSV_I(inode)->i_data) - 1);
-               }
+               inode->i_op = &sysv_symlink_inode_operations;
+               inode->i_mapping->a_ops = &sysv_aops;
        } else
                init_special_inode(inode, inode->i_mode, rdev);
 }
index e67aeac..4b74c97 100644 (file)
@@ -136,6 +136,9 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
 
 void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
 
+void
+drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
+
 int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
 int __must_check drm_atomic_commit(struct drm_atomic_state *state);
 int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
index 9c747cb..d2f4147 100644 (file)
@@ -342,10 +342,10 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
                               struct irq_phys_map *map, bool level);
 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
-int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
 struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
                                           int virt_irq, int irq);
 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
 
 #define irqchip_in_kernel(k)   (!!((k)->arch.vgic.in_kernel))
 #define vgic_initialized(k)    (!!((k)->arch.vgic.nr_cpus))
index 3fe27f8..c0d2b79 100644 (file)
@@ -794,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
                         struct scsi_ioctl_command __user *);
 
+extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+extern void blk_queue_exit(struct request_queue *q);
 extern void blk_start_queue(struct request_queue *q);
 extern void blk_stop_queue(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
index a8a335b..758a029 100644 (file)
@@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro
 int configfs_register_subsystem(struct configfs_subsystem *subsys);
 void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
 
+int configfs_register_group(struct config_group *parent_group,
+                           struct config_group *group);
+void configfs_unregister_group(struct config_group *group);
+
+struct config_group *
+configfs_register_default_group(struct config_group *parent_group,
+                               const char *name,
+                               struct config_item_type *item_type);
+void configfs_unregister_default_group(struct config_group *group);
+
 /* These functions can sleep and can alloc with GFP_KERNEL */
 /* WARNING: These cannot be called underneath configfs callbacks!! */
 int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
index 6523109..8942af0 100644 (file)
@@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
 
 static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
 {
-       return gfp_flags & __GFP_DIRECT_RECLAIM;
+       return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
 }
 
 #ifdef CONFIG_HIGHMEM
index 251a1d3..75b66ec 100644 (file)
@@ -168,6 +168,8 @@ struct hid_item {
 #define HID_UP_MSVENDOR                0xff000000
 #define HID_UP_CUSTOM          0x00ff0000
 #define HID_UP_LOGIVENDOR      0xffbc0000
+#define HID_UP_LOGIVENDOR2   0xff090000
+#define HID_UP_LOGIVENDOR3   0xff430000
 #define HID_UP_LNVENDOR                0xffa00000
 #define HID_UP_SENSOR          0x00200000
 
@@ -563,6 +565,9 @@ struct hid_device {                                                 /* device report descriptor */
        wait_queue_head_t debug_wait;
 };
 
+#define to_hid_device(pdev) \
+       container_of(pdev, struct hid_device, dev)
+
 static inline void *hid_get_drvdata(struct hid_device *hdev)
 {
        return dev_get_drvdata(&hdev->dev);
@@ -712,6 +717,9 @@ struct hid_driver {
        struct device_driver driver;
 };
 
+#define to_hid_driver(pdrv) \
+       container_of(pdrv, struct hid_driver, driver)
+
 /**
  * hid_ll_driver - low level driver callbacks
  * @start: called on probe to start the device
index 484604d..e15828f 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/atomic.h>
 #include <linux/kernel.h>
 #include <linux/mutex.h>
-#include <linux/spinlock.h>
 
 struct kref {
        atomic_t refcount;
@@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
        return kref_sub(kref, 1, release);
 }
 
-/**
- * kref_put_spinlock_irqsave - decrement refcount for object.
- * @kref: object.
- * @release: pointer to the function that will clean up the object when the
- *          last reference to the object is released.
- *          This pointer is required, and it is not acceptable to pass kfree
- *          in as this function.
- * @lock: lock to take in release case
- *
- * Behaves identical to kref_put with one exception.  If the reference count
- * drops to zero, the lock will be taken atomically wrt dropping the reference
- * count.  The release function has to call spin_unlock() without _irqrestore.
- */
-static inline int kref_put_spinlock_irqsave(struct kref *kref,
-               void (*release)(struct kref *kref),
-               spinlock_t *lock)
-{
-       unsigned long flags;
-
-       WARN_ON(release == NULL);
-       if (atomic_add_unless(&kref->refcount, -1, 1))
-               return 0;
-       spin_lock_irqsave(lock, flags);
-       if (atomic_dec_and_test(&kref->refcount)) {
-               release(kref);
-               local_irq_restore(flags);
-               return 1;
-       }
-       spin_unlock_irqrestore(lock, flags);
-       return 0;
-}
-
 static inline int kref_put_mutex(struct kref *kref,
                                 void (*release)(struct kref *kref),
                                 struct mutex *lock)
index 5706a21..c923350 100644 (file)
@@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
             (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
             idx++)
 
+static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
+{
+       struct kvm_vcpu *vcpu;
+       int i;
+
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               if (vcpu->vcpu_id == id)
+                       return vcpu;
+       return NULL;
+}
+
 #define kvm_for_each_memslot(memslot, slots)   \
        for (memslot = &slots->memslots[0];     \
              memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
index 69c9057..3db5552 100644 (file)
@@ -58,7 +58,6 @@ enum {
 struct nvm_id_group {
        u8      mtype;
        u8      fmtype;
-       u16     res16;
        u8      num_ch;
        u8      num_lun;
        u8      num_pln;
@@ -74,9 +73,9 @@ struct nvm_id_group {
        u32     tbet;
        u32     tbem;
        u32     mpos;
+       u32     mccap;
        u16     cpar;
-       u8      res[913];
-} __packed;
+};
 
 struct nvm_addr_format {
        u8      ch_offset;
@@ -91,19 +90,15 @@ struct nvm_addr_format {
        u8      pg_len;
        u8      sect_offset;
        u8      sect_len;
-       u8      res[4];
 };
 
 struct nvm_id {
        u8      ver_id;
        u8      vmnt;
        u8      cgrps;
-       u8      res[5];
        u32     cap;
        u32     dom;
        struct nvm_addr_format ppaf;
-       u8      ppat;
-       u8      resv[224];
        struct nvm_id_group groups[4];
 } __packed;
 
@@ -123,39 +118,28 @@ struct nvm_tgt_instance {
 #define NVM_VERSION_MINOR 0
 #define NVM_VERSION_PATCH 0
 
-#define NVM_SEC_BITS (8)
-#define NVM_PL_BITS  (6)
-#define NVM_PG_BITS  (16)
 #define NVM_BLK_BITS (16)
-#define NVM_LUN_BITS (10)
+#define NVM_PG_BITS  (16)
+#define NVM_SEC_BITS (8)
+#define NVM_PL_BITS  (8)
+#define NVM_LUN_BITS (8)
 #define NVM_CH_BITS  (8)
 
 struct ppa_addr {
+       /* Generic structure for all addresses */
        union {
-               /* Channel-based PPA format in nand 4x2x2x2x8x10 */
-               struct {
-                       u64 ch          : 4;
-                       u64 sec         : 2; /* 4 sectors per page */
-                       u64 pl          : 2; /* 4 planes per LUN */
-                       u64 lun         : 2; /* 4 LUNs per channel */
-                       u64 pg          : 8; /* 256 pages per block */
-                       u64 blk         : 10;/* 1024 blocks per plane */
-                       u64 resved              : 36;
-               } chnl;
-
-               /* Generic structure for all addresses */
                struct {
+                       u64 blk         : NVM_BLK_BITS;
+                       u64 pg          : NVM_PG_BITS;
                        u64 sec         : NVM_SEC_BITS;
                        u64 pl          : NVM_PL_BITS;
-                       u64 pg          : NVM_PG_BITS;
-                       u64 blk         : NVM_BLK_BITS;
                        u64 lun         : NVM_LUN_BITS;
                        u64 ch          : NVM_CH_BITS;
                } g;
 
                u64 ppa;
        };
-} __packed;
+};
 
 struct nvm_rq {
        struct nvm_tgt_instance *ins;
@@ -191,11 +175,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
 struct nvm_block;
 
 typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *);
+typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
 typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
 typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
                                nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int,
+typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int,
                                nvm_bb_update_fn *, void *);
 typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
 typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
@@ -210,7 +194,7 @@ struct nvm_dev_ops {
        nvm_id_fn               *identity;
        nvm_get_l2p_tbl_fn      *get_l2p_tbl;
        nvm_op_bb_tbl_fn        *get_bb_tbl;
-       nvm_op_set_bb_fn        *set_bb;
+       nvm_op_set_bb_fn        *set_bb_tbl;
 
        nvm_submit_io_fn        *submit_io;
        nvm_erase_blk_fn        *erase_block;
@@ -220,7 +204,7 @@ struct nvm_dev_ops {
        nvm_dev_dma_alloc_fn    *dev_dma_alloc;
        nvm_dev_dma_free_fn     *dev_dma_free;
 
-       uint8_t                 max_phys_sect;
+       unsigned int            max_phys_sect;
 };
 
 struct nvm_lun {
@@ -229,7 +213,9 @@ struct nvm_lun {
        int lun_id;
        int chnl_id;
 
+       unsigned int nr_inuse_blocks;   /* Number of used blocks */
        unsigned int nr_free_blocks;    /* Number of unused blocks */
+       unsigned int nr_bad_blocks;     /* Number of bad blocks */
        struct nvm_block *blocks;
 
        spinlock_t lock;
@@ -263,8 +249,7 @@ struct nvm_dev {
        int blks_per_lun;
        int sec_size;
        int oob_size;
-       int addr_mode;
-       struct nvm_addr_format addr_format;
+       struct nvm_addr_format ppaf;
 
        /* Calculated/Cached values. These do not reflect the actual usable
         * blocks at run-time.
@@ -290,118 +275,45 @@ struct nvm_dev {
        char name[DISK_NAME_LEN];
 };
 
-/* fallback conversion */
-static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev,
-                                                       struct ppa_addr r)
-{
-       struct ppa_addr l;
-
-       l.ppa = r.g.sec +
-               r.g.pg  * dev->sec_per_pg +
-               r.g.blk * (dev->pgs_per_blk *
-                               dev->sec_per_pg) +
-               r.g.lun * (dev->blks_per_lun *
-                               dev->pgs_per_blk *
-                               dev->sec_per_pg) +
-               r.g.ch * (dev->blks_per_lun *
-                               dev->pgs_per_blk *
-                               dev->luns_per_chnl *
-                               dev->sec_per_pg);
-
-       return l;
-}
-
-/* fallback conversion */
-static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
-                                                       struct ppa_addr r)
+static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
+                                               struct ppa_addr r)
 {
        struct ppa_addr l;
-       int secs, pgs, blks, luns;
-       sector_t ppa = r.ppa;
 
-       l.ppa = 0;
-
-       div_u64_rem(ppa, dev->sec_per_pg, &secs);
-       l.g.sec = secs;
-
-       sector_div(ppa, dev->sec_per_pg);
-       div_u64_rem(ppa, dev->sec_per_blk, &pgs);
-       l.g.pg = pgs;
-
-       sector_div(ppa, dev->pgs_per_blk);
-       div_u64_rem(ppa, dev->blks_per_lun, &blks);
-       l.g.blk = blks;
-
-       sector_div(ppa, dev->blks_per_lun);
-       div_u64_rem(ppa, dev->luns_per_chnl, &luns);
-       l.g.lun = luns;
-
-       sector_div(ppa, dev->luns_per_chnl);
-       l.g.ch = ppa;
+       l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
+       l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
+       l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
+       l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
+       l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
+       l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
 
        return l;
 }
 
-static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r)
+static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
+                                               struct ppa_addr r)
 {
        struct ppa_addr l;
 
-       l.ppa = 0;
-
-       l.chnl.sec = r.g.sec;
-       l.chnl.pl = r.g.pl;
-       l.chnl.pg = r.g.pg;
-       l.chnl.blk = r.g.blk;
-       l.chnl.lun = r.g.lun;
-       l.chnl.ch = r.g.ch;
-
-       return l;
-}
-
-static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r)
-{
-       struct ppa_addr l;
-
-       l.ppa = 0;
-
-       l.g.sec = r.chnl.sec;
-       l.g.pl = r.chnl.pl;
-       l.g.pg = r.chnl.pg;
-       l.g.blk = r.chnl.blk;
-       l.g.lun = r.chnl.lun;
-       l.g.ch = r.chnl.ch;
+       /*
+        * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
+        */
+       l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
+                                       (((1 << dev->ppaf.blk_len) - 1));
+       l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
+                                       (((1 << dev->ppaf.pg_len) - 1));
+       l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
+                                       (((1 << dev->ppaf.sect_len) - 1));
+       l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
+                                       (((1 << dev->ppaf.pln_len) - 1));
+       l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
+                                       (((1 << dev->ppaf.lun_len) - 1));
+       l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
+                                       (((1 << dev->ppaf.ch_len) - 1));
 
        return l;
 }
 
-static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
-                                               struct ppa_addr gppa)
-{
-       switch (dev->addr_mode) {
-       case NVM_ADDRMODE_LINEAR:
-               return __linear_to_generic_addr(dev, gppa);
-       case NVM_ADDRMODE_CHANNEL:
-               return __chnl_to_generic_addr(gppa);
-       default:
-               BUG();
-       }
-       return gppa;
-}
-
-static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
-                                               struct ppa_addr gppa)
-{
-       switch (dev->addr_mode) {
-       case NVM_ADDRMODE_LINEAR:
-               return __generic_to_linear_addr(dev, gppa);
-       case NVM_ADDRMODE_CHANNEL:
-               return __generic_to_chnl_addr(gppa);
-       default:
-               BUG();
-       }
-       return gppa;
-}
-
 static inline int ppa_empty(struct ppa_addr ppa_addr)
 {
        return (ppa_addr.ppa == ADDR_EMPTY);
@@ -468,7 +380,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
 typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
                                                                unsigned long);
 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
-typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *);
+typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
 
 struct nvmm_type {
        const char *name;
@@ -492,7 +404,7 @@ struct nvmm_type {
        nvmm_get_lun_fn *get_lun;
 
        /* Statistics */
-       nvmm_free_blocks_print_fn *free_blocks_print;
+       nvmm_lun_info_print_fn *lun_info_print;
        struct list_head list;
 };
 
index 570d630..11bbae4 100644 (file)
@@ -251,6 +251,7 @@ struct nfs4_layoutget {
        struct nfs4_layoutget_res res;
        struct rpc_cred *cred;
        gfp_t gfp_flags;
+       long timeout;
 };
 
 struct nfs4_getdeviceinfo_args {
index 36112cd..b90d8ec 100644 (file)
@@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np,
 static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
                                                     const char *name)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
index e828e7b..6ae25aa 100644 (file)
@@ -412,9 +412,18 @@ struct pci_host_bridge {
        void (*release_fn)(struct pci_host_bridge *);
        void *release_data;
        unsigned int ignore_reset_delay:1;      /* for entire hierarchy */
+       /* Resource alignment requirements */
+       resource_size_t (*align_resource)(struct pci_dev *dev,
+                       const struct resource *res,
+                       resource_size_t start,
+                       resource_size_t size,
+                       resource_size_t align);
 };
 
 #define        to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
+
+struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
+
 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
                     void (*release_fn)(struct pci_host_bridge *),
                     void *release_data);
index 80af3cd..72ce932 100644 (file)
@@ -71,7 +71,7 @@ struct scpi_ops {
        int (*sensor_get_value)(u16, u32 *);
 };
 
-#if IS_ENABLED(CONFIG_ARM_SCPI_PROTOCOL)
+#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
 struct scpi_ops *get_scpi_ops(void);
 #else
 static inline struct scpi_ops *get_scpi_ops(void) { return NULL; }
index ab1e039..92557bb 100644 (file)
@@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
 extern void set_current_blocked(sigset_t *);
 extern void __set_current_blocked(const sigset_t *);
 extern int show_unhandled_signals;
-extern int sigsuspend(sigset_t *);
 
 struct sigaction {
 #ifndef __ARCH_HAS_IRIX_SIGACTION
index 7c82e3b..2037a86 100644 (file)
@@ -157,6 +157,24 @@ size_t ksize(const void *);
 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
 #endif
 
+/*
+ * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
+ * Intended for arches that get misalignment faults even for 64 bit integer
+ * aligned buffers.
+ */
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
+/*
+ * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
+ * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
+ * aligned pointers.
+ */
+#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
+#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
+#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
+
 /*
  * Kmalloc array related definitions
  */
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size)
 }
 #endif /* !CONFIG_SLOB */
 
-void *__kmalloc(size_t size, gfp_t flags);
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
 void kmem_cache_free(struct kmem_cache *, void *);
 
 /*
@@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *);
  * Note that interrupts must be enabled when calling these functions.
  */
 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
 
 #ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
 #else
 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
 {
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
 #endif
 
 #ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
 
 #ifdef CONFIG_NUMA
 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
                                           gfp_t gfpflags,
-                                          int node, size_t size);
+                                          int node, size_t size) __assume_slab_alignment;
 #else
 static __always_inline void *
 kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
 }
 #endif /* CONFIG_TRACING */
 
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
 
 #ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
 #else
 static __always_inline void *
 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
        return __kmalloc_node(size, flags, node);
 }
 
-/*
- * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
- * Intended for arches that get misalignment faults even for 64 bit integer
- * aligned buffers.
- */
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
-#endif
-
 struct memcg_cache_array {
        struct rcu_head rcu;
        struct kmem_cache *entries[0];
index a156b82..c2b66a2 100644 (file)
@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename,
 asmlinkage long sys_lchown(const char __user *filename,
                                uid_t user, gid_t group);
 asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
 asmlinkage long sys_chown16(const char __user *filename,
                                old_uid_t user, old_gid_t group);
 asmlinkage long sys_lchown16(const char __user *filename,
index 4014a59..613c29b 100644 (file)
@@ -438,7 +438,8 @@ static inline void thermal_zone_device_unregister(
 static inline int thermal_zone_bind_cooling_device(
        struct thermal_zone_device *tz, int trip,
        struct thermal_cooling_device *cdev,
-       unsigned long upper, unsigned long lower)
+       unsigned long upper, unsigned long lower,
+       unsigned int weight)
 { return -ENODEV; }
 static inline int thermal_zone_unbind_cooling_device(
        struct thermal_zone_device *tz, int trip,
index 5b04b0a..5e31f1b 100644 (file)
@@ -607,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
 
 /* tty_audit.c */
 #ifdef CONFIG_AUDIT
-extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
+extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
                               size_t size, unsigned icanon);
 extern void tty_audit_exit(void);
 extern void tty_audit_fork(struct signal_struct *sig);
@@ -615,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
 extern void tty_audit_push(struct tty_struct *tty);
 extern int tty_audit_push_current(void);
 #else
-static inline void tty_audit_add_data(struct tty_struct *tty,
-               unsigned char *data, size_t size, unsigned icanon)
+static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
+                                     size_t size, unsigned icanon)
 {
 }
 static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
index 70d8500..70dd3df 100644 (file)
@@ -35,7 +35,7 @@ typedef __kernel_gid16_t        gid16_t;
 
 typedef unsigned long          uintptr_t;
 
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
 /* This is defined by include/asm-{arch}/posix_types.h */
 typedef __kernel_old_uid_t     old_uid_t;
 typedef __kernel_old_gid_t     old_gid_t;
index 0a2c740..aabf0ac 100644 (file)
@@ -474,7 +474,7 @@ struct se_cmd {
        struct completion       cmd_wait_comp;
        const struct target_core_fabric_ops *se_tfo;
        sense_reason_t          (*execute_cmd)(struct se_cmd *);
-       sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
+       sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
        void                    *protocol_data;
 
        unsigned char           *t_task_cdb;
index 654bae3..5e62961 100644 (file)
 
 #define NFS_PIPE_DIRNAME "nfs"
 
-/* NFS ioctls */
-/* Let's follow btrfs lead on CLONE to avoid messing userspace */
-#define NFS_IOC_CLONE          _IOW(0x94, 9, int)
-#define NFS_IOC_CLONE_RANGE    _IOW(0x94, 13, int)
-
-struct nfs_ioctl_clone_range_args {
-       __s64 src_fd;
-       __u64 src_off, count;
-       __u64 dst_off;
-};
-
 /*
  * NFS stats. The good thing with these values is that NFSv3 errors are
  * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which
index 6e53441..db545cb 100644 (file)
@@ -294,6 +294,12 @@ static int klp_write_object_relocations(struct module *pmod,
 
        for (reloc = obj->relocs; reloc->name; reloc++) {
                if (!klp_is_module(obj)) {
+
+#if defined(CONFIG_RANDOMIZE_BASE)
+                       /* If KASLR has been enabled, adjust old value accordingly */
+                       if (kaslr_enabled())
+                               reloc->val += kaslr_offset();
+#endif
                        ret = klp_verify_vmlinux_symbol(reloc->name,
                                                        reloc->val);
                        if (ret)
index 4579dbb..4b150bc 100644 (file)
@@ -152,8 +152,11 @@ void panic(const char *fmt, ...)
         * We may have ended up stopping the CPU holding the lock (in
         * smp_send_stop()) while still having some valuable data in the console
         * buffer.  Try to acquire the lock then release it regardless of the
-        * result.  The release will also print the buffers out.
+        * result.  The release will also print the buffers out.  Locks debug
+        * should be disabled to avoid reporting bad unlock balance when
+        * panic() is not being callled from OOPS.
         */
+       debug_locks_off();
        console_trylock();
        console_unlock();
 
index ca36879..78b3d9f 100644 (file)
@@ -467,7 +467,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
        rcu_read_lock();
        if (type != PIDTYPE_PID)
                task = task->group_leader;
-       pid = get_pid(task->pids[type].pid);
+       pid = get_pid(rcu_dereference(task->pids[type].pid));
        rcu_read_unlock();
        return pid;
 }
@@ -528,7 +528,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
        if (likely(pid_alive(task))) {
                if (type != PIDTYPE_PID)
                        task = task->group_leader;
-               nr = pid_nr_ns(task->pids[type].pid, ns);
+               nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
        }
        rcu_read_unlock();
 
index c0b01fe..f3f1f7a 100644 (file)
@@ -3503,7 +3503,7 @@ SYSCALL_DEFINE0(pause)
 
 #endif
 
-int sigsuspend(sigset_t *set)
+static int sigsuspend(sigset_t *set)
 {
        current->saved_sigmask = current->blocked;
        set_current_blocked(set);
index 75f1d05..9c6045a 100644 (file)
@@ -1887,12 +1887,6 @@ rb_event_index(struct ring_buffer_event *event)
        return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
 }
 
-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
-{
-       cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
-       cpu_buffer->reader_page->read = 0;
-}
-
 static void rb_inc_iter(struct ring_buffer_iter *iter)
 {
        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
@@ -2803,8 +2797,11 @@ rb_reserve_next_event(struct ring_buffer *buffer,
 
        event = __rb_reserve_next(cpu_buffer, &info);
 
-       if (unlikely(PTR_ERR(event) == -EAGAIN))
+       if (unlikely(PTR_ERR(event) == -EAGAIN)) {
+               if (info.add_timestamp)
+                       info.length -= RB_LEN_TIME_EXTEND;
                goto again;
+       }
 
        if (!event)
                goto out_fail;
@@ -3626,7 +3623,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
 
        /* Finally update the reader page to the new head */
        cpu_buffer->reader_page = reader;
-       rb_reset_reader_page(cpu_buffer);
+       cpu_buffer->reader_page->read = 0;
 
        if (overwrite != cpu_buffer->last_overrun) {
                cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
@@ -3636,6 +3633,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        goto again;
 
  out:
+       /* Update the read_stamp on the first event */
+       if (reader && reader->read == 0)
+               cpu_buffer->read_stamp = reader->page->time_stamp;
+
        arch_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
 
index c29ddeb..62fe06b 100644 (file)
@@ -2009,7 +2009,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
                /*
                 * Be somewhat over-protective like KSM for now!
                 */
-               if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
+               if (*vm_flags & VM_NO_THP)
                        return -EINVAL;
                *vm_flags &= ~VM_NOHUGEPAGE;
                *vm_flags |= VM_HUGEPAGE;
@@ -2025,7 +2025,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
                /*
                 * Be somewhat over-protective like KSM for now!
                 */
-               if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
+               if (*vm_flags & VM_NO_THP)
                        return -EINVAL;
                *vm_flags &= ~VM_HUGEPAGE;
                *vm_flags |= VM_NOHUGEPAGE;
index d41b21b..bc0a8d8 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/kmemleak.h>
 #include <linux/memblock.h>
 #include <linux/memory.h>
 #include <linux/mm.h>
@@ -444,6 +445,7 @@ int kasan_module_alloc(void *addr, size_t size)
 
        if (ret) {
                find_vm_area(addr)->flags |= VM_KASAN;
+               kmemleak_ignore(ret);
                return 0;
        }
 
index deb679c..c387430 100644 (file)
@@ -3015,9 +3015,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                } else {
                        /*
                         * The fault handler has no page to lock, so it holds
-                        * i_mmap_lock for write to protect against truncate.
+                        * i_mmap_lock for read to protect against truncate.
                         */
-                       i_mmap_unlock_write(vma->vm_file->f_mapping);
+                       i_mmap_unlock_read(vma->vm_file->f_mapping);
                }
                goto uncharge_out;
        }
@@ -3031,9 +3031,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        } else {
                /*
                 * The fault handler has no page to lock, so it holds
-                * i_mmap_lock for write to protect against truncate.
+                * i_mmap_lock for read to protect against truncate.
                 */
-               i_mmap_unlock_write(vma->vm_file->f_mapping);
+               i_mmap_unlock_read(vma->vm_file->f_mapping);
        }
        return ret;
 uncharge_out:
index 2c90357..3e4d654 100644 (file)
@@ -1542,7 +1542,9 @@ static void balance_dirty_pages(struct address_space *mapping,
        for (;;) {
                unsigned long now = jiffies;
                unsigned long dirty, thresh, bg_thresh;
-               unsigned long m_dirty, m_thresh, m_bg_thresh;
+               unsigned long m_dirty = 0;      /* stop bogus uninit warnings */
+               unsigned long m_thresh = 0;
+               unsigned long m_bg_thresh = 0;
 
                /*
                 * Unstable writes are a feature of certain networked
index e0819fa..4765c97 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3419,7 +3419,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
 }
 EXPORT_SYMBOL(kmem_cache_free_bulk);
 
-bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
                                                                void **p)
 {
        return __kmem_cache_alloc_bulk(s, flags, size, p);
index 27492eb..7b60871 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -170,7 +170,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
  * may be allocated or freed using these operations.
  */
 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
 
 #ifdef CONFIG_MEMCG_KMEM
 /*
index d88e97c..3c6a86b 100644 (file)
@@ -112,7 +112,7 @@ void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
                kmem_cache_free(s, p[i]);
 }
 
-bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
+int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
                                                                void **p)
 {
        size_t i;
@@ -121,10 +121,10 @@ bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
                void *x = p[i] = kmem_cache_alloc(s, flags);
                if (!x) {
                        __kmem_cache_free_bulk(s, i, p);
-                       return false;
+                       return 0;
                }
        }
-       return true;
+       return i;
 }
 
 #ifdef CONFIG_MEMCG_KMEM
index 0d7e5df..17e8f8c 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -617,7 +617,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
 }
 EXPORT_SYMBOL(kmem_cache_free_bulk);
 
-bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
                                                                void **p)
 {
        return __kmem_cache_alloc_bulk(s, flags, size, p);
index 7cb4bf9..4699751 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1065,11 +1065,15 @@ bad:
        return 0;
 }
 
+/* Supports checking bulk free of a constructed freelist */
 static noinline struct kmem_cache_node *free_debug_processing(
-       struct kmem_cache *s, struct page *page, void *object,
+       struct kmem_cache *s, struct page *page,
+       void *head, void *tail, int bulk_cnt,
        unsigned long addr, unsigned long *flags)
 {
        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+       void *object = head;
+       int cnt = 0;
 
        spin_lock_irqsave(&n->list_lock, *flags);
        slab_lock(page);
@@ -1077,6 +1081,9 @@ static noinline struct kmem_cache_node *free_debug_processing(
        if (!check_slab(s, page))
                goto fail;
 
+next_object:
+       cnt++;
+
        if (!check_valid_pointer(s, page, object)) {
                slab_err(s, page, "Invalid object pointer 0x%p", object);
                goto fail;
@@ -1107,8 +1114,19 @@ static noinline struct kmem_cache_node *free_debug_processing(
        if (s->flags & SLAB_STORE_USER)
                set_track(s, object, TRACK_FREE, addr);
        trace(s, page, object, 0);
+       /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
        init_object(s, object, SLUB_RED_INACTIVE);
+
+       /* Reached end of constructed freelist yet? */
+       if (object != tail) {
+               object = get_freepointer(s, object);
+               goto next_object;
+       }
 out:
+       if (cnt != bulk_cnt)
+               slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
+                        bulk_cnt, cnt);
+
        slab_unlock(page);
        /*
         * Keep node_lock to preserve integrity
@@ -1204,7 +1222,7 @@ unsigned long kmem_cache_flags(unsigned long object_size,
 
        return flags;
 }
-#else
+#else /* !CONFIG_SLUB_DEBUG */
 static inline void setup_object_debug(struct kmem_cache *s,
                        struct page *page, void *object) {}
 
@@ -1212,7 +1230,8 @@ static inline int alloc_debug_processing(struct kmem_cache *s,
        struct page *page, void *object, unsigned long addr) { return 0; }
 
 static inline struct kmem_cache_node *free_debug_processing(
-       struct kmem_cache *s, struct page *page, void *object,
+       struct kmem_cache *s, struct page *page,
+       void *head, void *tail, int bulk_cnt,
        unsigned long addr, unsigned long *flags) { return NULL; }
 
 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
@@ -1273,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
        return memcg_kmem_get_cache(s, flags);
 }
 
-static inline void slab_post_alloc_hook(struct kmem_cache *s,
-                                       gfp_t flags, void *object)
+static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
+                                       size_t size, void **p)
 {
+       size_t i;
+
        flags &= gfp_allowed_mask;
-       kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
-       kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
+       for (i = 0; i < size; i++) {
+               void *object = p[i];
+
+               kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
+               kmemleak_alloc_recursive(object, s->object_size, 1,
+                                        s->flags, flags);
+               kasan_slab_alloc(s, object);
+       }
        memcg_kmem_put_cache(s);
-       kasan_slab_alloc(s, object);
 }
 
 static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -1308,6 +1334,29 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
        kasan_slab_free(s, x);
 }
 
+static inline void slab_free_freelist_hook(struct kmem_cache *s,
+                                          void *head, void *tail)
+{
+/*
+ * Compiler cannot detect this function can be removed if slab_free_hook()
+ * evaluates to nothing.  Thus, catch all relevant config debug options here.
+ */
+#if defined(CONFIG_KMEMCHECK) ||               \
+       defined(CONFIG_LOCKDEP) ||              \
+       defined(CONFIG_DEBUG_KMEMLEAK) ||       \
+       defined(CONFIG_DEBUG_OBJECTS_FREE) ||   \
+       defined(CONFIG_KASAN)
+
+       void *object = head;
+       void *tail_obj = tail ? : head;
+
+       do {
+               slab_free_hook(s, object);
+       } while ((object != tail_obj) &&
+                (object = get_freepointer(s, object)));
+#endif
+}
+
 static void setup_object(struct kmem_cache *s, struct page *page,
                                void *object)
 {
@@ -2295,23 +2344,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
  * And if we were unable to get a new slab from the partial slab lists then
  * we need to allocate a new slab. This is the slowest path since it involves
  * a call to the page allocator and the setup of a new slab.
+ *
+ * Version of __slab_alloc to use when we know that interrupts are
+ * already disabled (which is the case for bulk allocation).
  */
-static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
                          unsigned long addr, struct kmem_cache_cpu *c)
 {
        void *freelist;
        struct page *page;
-       unsigned long flags;
-
-       local_irq_save(flags);
-#ifdef CONFIG_PREEMPT
-       /*
-        * We may have been preempted and rescheduled on a different
-        * cpu before disabling interrupts. Need to reload cpu area
-        * pointer.
-        */
-       c = this_cpu_ptr(s->cpu_slab);
-#endif
 
        page = c->page;
        if (!page)
@@ -2369,7 +2410,6 @@ load_freelist:
        VM_BUG_ON(!c->page->frozen);
        c->freelist = get_freepointer(s, freelist);
        c->tid = next_tid(c->tid);
-       local_irq_restore(flags);
        return freelist;
 
 new_slab:
@@ -2386,7 +2426,6 @@ new_slab:
 
        if (unlikely(!freelist)) {
                slab_out_of_memory(s, gfpflags, node);
-               local_irq_restore(flags);
                return NULL;
        }
 
@@ -2402,10 +2441,34 @@ new_slab:
        deactivate_slab(s, page, get_freepointer(s, freelist));
        c->page = NULL;
        c->freelist = NULL;
-       local_irq_restore(flags);
        return freelist;
 }
 
+/*
+ * Another one that disabled interrupt and compensates for possible
+ * cpu changes by refetching the per cpu area pointer.
+ */
+static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+                         unsigned long addr, struct kmem_cache_cpu *c)
+{
+       void *p;
+       unsigned long flags;
+
+       local_irq_save(flags);
+#ifdef CONFIG_PREEMPT
+       /*
+        * We may have been preempted and rescheduled on a different
+        * cpu before disabling interrupts. Need to reload cpu area
+        * pointer.
+        */
+       c = this_cpu_ptr(s->cpu_slab);
+#endif
+
+       p = ___slab_alloc(s, gfpflags, node, addr, c);
+       local_irq_restore(flags);
+       return p;
+}
+
 /*
  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
  * have the fastpath folded into their functions. So no function call
@@ -2419,7 +2482,7 @@ new_slab:
 static __always_inline void *slab_alloc_node(struct kmem_cache *s,
                gfp_t gfpflags, int node, unsigned long addr)
 {
-       void **object;
+       void *object;
        struct kmem_cache_cpu *c;
        struct page *page;
        unsigned long tid;
@@ -2498,7 +2561,7 @@ redo:
        if (unlikely(gfpflags & __GFP_ZERO) && object)
                memset(object, 0, s->object_size);
 
-       slab_post_alloc_hook(s, gfpflags, object);
+       slab_post_alloc_hook(s, gfpflags, 1, &object);
 
        return object;
 }
@@ -2569,10 +2632,11 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
  * handling required then we can return immediately.
  */
 static void __slab_free(struct kmem_cache *s, struct page *page,
-                       void *x, unsigned long addr)
+                       void *head, void *tail, int cnt,
+                       unsigned long addr)
+
 {
        void *prior;
-       void **object = (void *)x;
        int was_frozen;
        struct page new;
        unsigned long counters;
@@ -2582,7 +2646,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
        stat(s, FREE_SLOWPATH);
 
        if (kmem_cache_debug(s) &&
-               !(n = free_debug_processing(s, page, x, addr, &flags)))
+           !(n = free_debug_processing(s, page, head, tail, cnt,
+                                       addr, &flags)))
                return;
 
        do {
@@ -2592,10 +2657,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                }
                prior = page->freelist;
                counters = page->counters;
-               set_freepointer(s, object, prior);
+               set_freepointer(s, tail, prior);
                new.counters = counters;
                was_frozen = new.frozen;
-               new.inuse--;
+               new.inuse -= cnt;
                if ((!new.inuse || !prior) && !was_frozen) {
 
                        if (kmem_cache_has_cpu_partial(s) && !prior) {
@@ -2626,7 +2691,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
 
        } while (!cmpxchg_double_slab(s, page,
                prior, counters,
-               object, new.counters,
+               head, new.counters,
                "__slab_free"));
 
        if (likely(!n)) {
@@ -2691,15 +2756,20 @@ slab_empty:
  *
  * If fastpath is not possible then fall back to __slab_free where we deal
  * with all sorts of special processing.
+ *
+ * Bulk free of a freelist with several objects (all pointing to the
+ * same page) possible by specifying head and tail ptr, plus objects
+ * count (cnt). Bulk free indicated by tail pointer being set.
  */
-static __always_inline void slab_free(struct kmem_cache *s,
-                       struct page *page, void *x, unsigned long addr)
+static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
+                                     void *head, void *tail, int cnt,
+                                     unsigned long addr)
 {
-       void **object = (void *)x;
+       void *tail_obj = tail ? : head;
        struct kmem_cache_cpu *c;
        unsigned long tid;
 
-       slab_free_hook(s, x);
+       slab_free_freelist_hook(s, head, tail);
 
 redo:
        /*
@@ -2718,19 +2788,19 @@ redo:
        barrier();
 
        if (likely(page == c->page)) {
-               set_freepointer(s, object, c->freelist);
+               set_freepointer(s, tail_obj, c->freelist);
 
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
                                c->freelist, tid,
-                               object, next_tid(tid)))) {
+                               head, next_tid(tid)))) {
 
                        note_cmpxchg_failure("slab_free", s, tid);
                        goto redo;
                }
                stat(s, FREE_FASTPATH);
        } else
-               __slab_free(s, page, x, addr);
+               __slab_free(s, page, head, tail_obj, cnt, addr);
 
 }
 
@@ -2739,59 +2809,116 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
        s = cache_from_obj(s, x);
        if (!s)
                return;
-       slab_free(s, virt_to_head_page(x), x, _RET_IP_);
+       slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
        trace_kmem_cache_free(_RET_IP_, x);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
-/* Note that interrupts must be enabled when calling this function. */
-void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
-{
-       struct kmem_cache_cpu *c;
+struct detached_freelist {
        struct page *page;
-       int i;
+       void *tail;
+       void *freelist;
+       int cnt;
+};
 
-       local_irq_disable();
-       c = this_cpu_ptr(s->cpu_slab);
+/*
+ * This function progressively scans the array with free objects (with
+ * a limited look ahead) and extract objects belonging to the same
+ * page.  It builds a detached freelist directly within the given
+ * page/objects.  This can happen without any need for
+ * synchronization, because the objects are owned by running process.
+ * The freelist is build up as a single linked list in the objects.
+ * The idea is, that this detached freelist can then be bulk
+ * transferred to the real freelist(s), but only requiring a single
+ * synchronization primitive.  Look ahead in the array is limited due
+ * to performance reasons.
+ */
+static int build_detached_freelist(struct kmem_cache *s, size_t size,
+                                  void **p, struct detached_freelist *df)
+{
+       size_t first_skipped_index = 0;
+       int lookahead = 3;
+       void *object;
 
-       for (i = 0; i < size; i++) {
-               void *object = p[i];
+       /* Always re-init detached_freelist */
+       df->page = NULL;
 
-               BUG_ON(!object);
-               /* kmem cache debug support */
-               s = cache_from_obj(s, object);
-               if (unlikely(!s))
-                       goto exit;
-               slab_free_hook(s, object);
+       do {
+               object = p[--size];
+       } while (!object && size);
 
-               page = virt_to_head_page(object);
+       if (!object)
+               return 0;
 
-               if (c->page == page) {
-                       /* Fastpath: local CPU free */
-                       set_freepointer(s, object, c->freelist);
-                       c->freelist = object;
-               } else {
-                       c->tid = next_tid(c->tid);
-                       local_irq_enable();
-                       /* Slowpath: overhead locked cmpxchg_double_slab */
-                       __slab_free(s, page, object, _RET_IP_);
-                       local_irq_disable();
-                       c = this_cpu_ptr(s->cpu_slab);
+       /* Start new detached freelist */
+       set_freepointer(s, object, NULL);
+       df->page = virt_to_head_page(object);
+       df->tail = object;
+       df->freelist = object;
+       p[size] = NULL; /* mark object processed */
+       df->cnt = 1;
+
+       while (size) {
+               object = p[--size];
+               if (!object)
+                       continue; /* Skip processed objects */
+
+               /* df->page is always set at this point */
+               if (df->page == virt_to_head_page(object)) {
+                       /* Opportunity build freelist */
+                       set_freepointer(s, object, df->freelist);
+                       df->freelist = object;
+                       df->cnt++;
+                       p[size] = NULL; /* mark object processed */
+
+                       continue;
                }
+
+               /* Limit look ahead search */
+               if (!--lookahead)
+                       break;
+
+               if (!first_skipped_index)
+                       first_skipped_index = size + 1;
        }
-exit:
-       c->tid = next_tid(c->tid);
-       local_irq_enable();
+
+       return first_skipped_index;
+}
+
+
+/* Note that interrupts must be enabled when calling this function. */
+void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
+{
+       if (WARN_ON(!size))
+               return;
+
+       do {
+               struct detached_freelist df;
+               struct kmem_cache *s;
+
+               /* Support for memcg */
+               s = cache_from_obj(orig_s, p[size - 1]);
+
+               size = build_detached_freelist(s, size, p, &df);
+               if (unlikely(!df.page))
+                       continue;
+
+               slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
+       } while (likely(size));
 }
 EXPORT_SYMBOL(kmem_cache_free_bulk);
 
 /* Note that interrupts must be enabled when calling this function. */
-bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
-                          void **p)
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+                         void **p)
 {
        struct kmem_cache_cpu *c;
        int i;
 
+       /* memcg and kmem_cache debug support */
+       s = slab_pre_alloc_hook(s, flags);
+       if (unlikely(!s))
+               return false;
        /*
         * Drain objects in the per cpu slab, while disabling local
         * IRQs, which protects against PREEMPT and interrupts
@@ -2804,36 +2931,20 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
                void *object = c->freelist;
 
                if (unlikely(!object)) {
-                       local_irq_enable();
                        /*
                         * Invoking slow path likely have side-effect
                         * of re-populating per CPU c->freelist
                         */
-                       p[i] = __slab_alloc(s, flags, NUMA_NO_NODE,
+                       p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
                                            _RET_IP_, c);
-                       if (unlikely(!p[i])) {
-                               __kmem_cache_free_bulk(s, i, p);
-                               return false;
-                       }
-                       local_irq_disable();
+                       if (unlikely(!p[i]))
+                               goto error;
+
                        c = this_cpu_ptr(s->cpu_slab);
                        continue; /* goto for-loop */
                }
-
-               /* kmem_cache debug support */
-               s = slab_pre_alloc_hook(s, flags);
-               if (unlikely(!s)) {
-                       __kmem_cache_free_bulk(s, i, p);
-                       c->tid = next_tid(c->tid);
-                       local_irq_enable();
-                       return false;
-               }
-
                c->freelist = get_freepointer(s, object);
                p[i] = object;
-
-               /* kmem_cache debug support */
-               slab_post_alloc_hook(s, flags, object);
        }
        c->tid = next_tid(c->tid);
        local_irq_enable();
@@ -2846,7 +2957,14 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
                        memset(p[j], 0, s->object_size);
        }
 
-       return true;
+       /* memcg and kmem_cache debug support */
+       slab_post_alloc_hook(s, flags, size, p);
+       return i;
+error:
+       local_irq_enable();
+       slab_post_alloc_hook(s, flags, i, p);
+       __kmem_cache_free_bulk(s, i, p);
+       return 0;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
 
@@ -3511,7 +3629,7 @@ void kfree(const void *x)
                __free_kmem_pages(page, compound_order(page));
                return;
        }
-       slab_free(page->slab_cache, page, object, _RET_IP_);
+       slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
 }
 EXPORT_SYMBOL(kfree);
 
index d045634..8e3c9c5 100644 (file)
@@ -1443,7 +1443,6 @@ struct vm_struct *remove_vm_area(const void *addr)
                vmap_debug_free_range(va->va_start, va->va_end);
                kasan_free_shadow(vm);
                free_unmap_vmap_area(va);
-               vm->size -= PAGE_SIZE;
 
                return vm;
        }
@@ -1468,8 +1467,8 @@ static void __vunmap(const void *addr, int deallocate_pages)
                return;
        }
 
-       debug_check_no_locks_freed(addr, area->size);
-       debug_check_no_obj_freed(addr, area->size);
+       debug_check_no_locks_freed(addr, get_vm_area_size(area));
+       debug_check_no_obj_freed(addr, get_vm_area_size(area));
 
        if (deallocate_pages) {
                int i;
index 229956b..95f82d8 100644 (file)
@@ -353,12 +353,20 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
 {
        struct rpc_xprt *xprt = req->rq_xprt;
        struct svc_serv *bc_serv = xprt->bc_serv;
+       struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf;
 
        spin_lock(&xprt->bc_pa_lock);
        list_del(&req->rq_bc_pa_list);
        xprt_dec_alloc_count(xprt, 1);
        spin_unlock(&xprt->bc_pa_lock);
 
+       if (copied <= rq_rcv_buf->head[0].iov_len) {
+               rq_rcv_buf->head[0].iov_len = copied;
+               rq_rcv_buf->page_len = 0;
+       } else {
+               rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len;
+       }
+
        req->rq_private_buf.len = copied;
        set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
 
index bc5b7b5..7fccf96 100644 (file)
@@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
        memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
        memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
        memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
+       rqstp->rq_arg.len = req->rq_private_buf.len;
 
        /* reset result send buffer "put" position */
        resv->iov_len = 0;
index 125b906..638a38e 100755 (executable)
@@ -2711,7 +2711,7 @@ $kernelversion = get_kernel_version();
 
 # generate a sequence of code that will splice in highlighting information
 # using the s// operator.
-foreach my $k (keys @highlights) {
+for (my $k = 0; $k < @highlights; $k++) {
     my $pattern = $highlights[$k][0];
     my $result = $highlights[$k][1];
 #   print STDERR "scanning pattern:$pattern, highlight:($result)\n";
index 927db9f..696ccfa 100644 (file)
@@ -845,6 +845,8 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
        size_t datalen = prep->datalen;
        int ret = 0;
 
+       if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+               return -ENOKEY;
        if (datalen <= 0 || datalen > 32767 || !prep->data)
                return -EINVAL;
 
index 903dace..16dec53 100644 (file)
@@ -1007,13 +1007,16 @@ static void trusted_rcu_free(struct rcu_head *rcu)
  */
 static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
 {
-       struct trusted_key_payload *p = key->payload.data[0];
+       struct trusted_key_payload *p;
        struct trusted_key_payload *new_p;
        struct trusted_key_options *new_o;
        size_t datalen = prep->datalen;
        char *datablob;
        int ret = 0;
 
+       if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+               return -ENOKEY;
+       p = key->payload.data[0];
        if (!p->migratable)
                return -EPERM;
        if (datalen <= 0 || datalen > 32767 || !prep->data)
index 28cb30f..8705d79 100644 (file)
@@ -120,7 +120,10 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
 
        if (ret == 0) {
                /* attach the new data, displacing the old */
-               zap = key->payload.data[0];
+               if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+                       zap = key->payload.data[0];
+               else
+                       zap = NULL;
                rcu_assign_keypointer(key, upayload);
                key->expiry = 0;
        }
index 18643bf..456e1a9 100644 (file)
@@ -638,7 +638,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
 {
        struct avtab_node *node;
 
-       if (!ctab || !key || !avd || !xperms)
+       if (!ctab || !key || !avd)
                return;
 
        for (node = avtab_search_node(ctab, key); node;
@@ -657,7 +657,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
                if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) ==
                    (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED)))
                        avd->auditallow |= node->datum.u.data;
-               if ((node->key.specified & AVTAB_ENABLED) &&
+               if (xperms && (node->key.specified & AVTAB_ENABLED) &&
                                (node->key.specified & AVTAB_XPERMS))
                        services_compute_xperms_drivers(xperms, node);
        }
index 5d99436..0cda05c 100644 (file)
@@ -12,9 +12,11 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_LICENSE("GPL v2");
 
 #define OUI_WEISS              0x001c6a
+#define OUI_LOUD               0x000ff2
 
 #define DICE_CATEGORY_ID       0x04
 #define WEISS_CATEGORY_ID      0x00
+#define LOUD_CATEGORY_ID       0x10
 
 static int dice_interface_check(struct fw_unit *unit)
 {
@@ -57,6 +59,8 @@ static int dice_interface_check(struct fw_unit *unit)
        }
        if (vendor == OUI_WEISS)
                category = WEISS_CATEGORY_ID;
+       else if (vendor == OUI_LOUD)
+               category = LOUD_CATEGORY_ID;
        else
                category = DICE_CATEGORY_ID;
        if (device->config_rom[3] != ((vendor << 8) | category) ||
index 8a7fbdc..963f824 100644 (file)
@@ -312,6 +312,10 @@ enum {
        (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\
         AZX_DCAPS_I915_POWERWELL)
 
+#define AZX_DCAPS_INTEL_BROXTON \
+       (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\
+        AZX_DCAPS_I915_POWERWELL)
+
 /* quirks for ATI SB / AMD Hudson */
 #define AZX_DCAPS_PRESET_ATI_SB \
        (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\
@@ -2124,6 +2128,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Sunrise Point-LP */
        { PCI_DEVICE(0x8086, 0x9d70),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Broxton-P(Apollolake) */
+       { PCI_DEVICE(0x8086, 0x5a98),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
        /* Haswell */
        { PCI_DEVICE(0x8086, 0x0a0c),
          .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
index 60cd9e7..bdb6f22 100644 (file)
@@ -2378,7 +2378,8 @@ static int patch_generic_hdmi(struct hda_codec *codec)
         * can cover the codec power request, and so need not set this flag.
         * For previous platforms, there is no such power well feature.
         */
-       if (is_valleyview_plus(codec) || is_skylake(codec))
+       if (is_valleyview_plus(codec) || is_skylake(codec) ||
+                       is_broxton(codec))
                codec->core.link_power_control = 1;
 
        if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
index 2f7b065..9bedf7c 100644 (file)
@@ -1759,6 +1759,7 @@ enum {
        ALC882_FIXUP_NO_PRIMARY_HP,
        ALC887_FIXUP_ASUS_BASS,
        ALC887_FIXUP_BASS_CHMAP,
+       ALC882_FIXUP_DISABLE_AAMIX,
 };
 
 static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1920,6 +1921,8 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
 
 static void alc_fixup_bass_chmap(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action);
+static void alc_fixup_disable_aamix(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action);
 
 static const struct hda_fixup alc882_fixups[] = {
        [ALC882_FIXUP_ABIT_AW9D_MAX] = {
@@ -2151,6 +2154,10 @@ static const struct hda_fixup alc882_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_bass_chmap,
        },
+       [ALC882_FIXUP_DISABLE_AAMIX] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_disable_aamix,
+       },
 };
 
 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2218,6 +2225,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
        SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -4587,6 +4595,7 @@ enum {
        ALC292_FIXUP_DISABLE_AAMIX,
        ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC275_FIXUP_DELL_XPS,
+       ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5167,6 +5176,17 @@ static const struct hda_fixup alc269_fixups[] = {
                        {}
                }
        },
+       [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       /* Disable pass-through path for FRONT 14h */
+                       {0x20, AC_VERB_SET_COEF_INDEX, 0x36},
+                       {0x20, AC_VERB_SET_PROC_COEF, 0x1737},
+                       {}
+               },
+               .chained = true,
+               .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5180,8 +5200,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+       SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
+       SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
        SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
        SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
        SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
@@ -5204,6 +5226,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+       SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
index 826122d..2c7c5eb 100644 (file)
@@ -3110,6 +3110,29 @@ static void stac92hd71bxx_fixup_hp_hdx(struct hda_codec *codec,
        spec->gpio_led = 0x08;
 }
 
+static bool is_hp_output(struct hda_codec *codec, hda_nid_t pin)
+{
+       unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
+
+       /* count line-out, too, as BIOS sets often so */
+       return get_defcfg_connect(pin_cfg) != AC_JACK_PORT_NONE &&
+               (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
+                get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT);
+}
+
+static void fixup_hp_headphone(struct hda_codec *codec, hda_nid_t pin)
+{
+       unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
+
+       /* It was changed in the BIOS to just satisfy MS DTM.
+        * Lets turn it back into slaved HP
+        */
+       pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) |
+               (AC_JACK_HP_OUT << AC_DEFCFG_DEVICE_SHIFT);
+       pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC | AC_DEFCFG_SEQUENCE))) |
+               0x1f;
+       snd_hda_codec_set_pincfg(codec, pin, pin_cfg);
+}
 
 static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
                                   const struct hda_fixup *fix, int action)
@@ -3119,22 +3142,12 @@ static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
        if (action != HDA_FIXUP_ACT_PRE_PROBE)
                return;
 
-       if (hp_blike_system(codec->core.subsystem_id)) {
-               unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f);
-               if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
-                       get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER  ||
-                       get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT) {
-                       /* It was changed in the BIOS to just satisfy MS DTM.
-                        * Lets turn it back into slaved HP
-                        */
-                       pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE))
-                                       | (AC_JACK_HP_OUT <<
-                                               AC_DEFCFG_DEVICE_SHIFT);
-                       pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC
-                                                       | AC_DEFCFG_SEQUENCE)))
-                                                               | 0x1f;
-                       snd_hda_codec_set_pincfg(codec, 0x0f, pin_cfg);
-               }
+       /* when both output A and F are assigned, these are supposedly
+        * dock and built-in headphones; fix both pin configs
+        */
+       if (is_hp_output(codec, 0x0a) && is_hp_output(codec, 0x0f)) {
+               fixup_hp_headphone(codec, 0x0a);
+               fixup_hp_headphone(codec, 0x0f);
        }
 
        if (find_mute_led_cfg(codec, 1))
index 7661616..5b4c58c 100644 (file)
@@ -174,6 +174,8 @@ struct snd_usb_midi_in_endpoint {
                u8 running_status_length;
        } ports[0x10];
        u8 seen_f5;
+       bool in_sysex;
+       u8 last_cin;
        u8 error_resubmit;
        int current_port;
 };
@@ -467,6 +469,39 @@ static void snd_usbmidi_maudio_broken_running_status_input(
                }
 }
 
+/*
+ * QinHeng CH345 is buggy: every second packet inside a SysEx has not CIN 4
+ * but the previously seen CIN, but still with three data bytes.
+ */
+static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep,
+                                    uint8_t *buffer, int buffer_length)
+{
+       unsigned int i, cin, length;
+
+       for (i = 0; i + 3 < buffer_length; i += 4) {
+               if (buffer[i] == 0 && i > 0)
+                       break;
+               cin = buffer[i] & 0x0f;
+               if (ep->in_sysex &&
+                   cin == ep->last_cin &&
+                   (buffer[i + 1 + (cin == 0x6)] & 0x80) == 0)
+                       cin = 0x4;
+#if 0
+               if (buffer[i + 1] == 0x90) {
+                       /*
+                        * Either a corrupted running status or a real note-on
+                        * message; impossible to detect reliably.
+                        */
+               }
+#endif
+               length = snd_usbmidi_cin_length[cin];
+               snd_usbmidi_input_data(ep, 0, &buffer[i + 1], length);
+               ep->in_sysex = cin == 0x4;
+               if (!ep->in_sysex)
+                       ep->last_cin = cin;
+       }
+}
+
 /*
  * CME protocol: like the standard protocol, but SysEx commands are sent as a
  * single USB packet preceded by a 0x0F byte.
@@ -660,6 +695,12 @@ static struct usb_protocol_ops snd_usbmidi_cme_ops = {
        .output_packet = snd_usbmidi_output_standard_packet,
 };
 
+static struct usb_protocol_ops snd_usbmidi_ch345_broken_sysex_ops = {
+       .input = ch345_broken_sysex_input,
+       .output = snd_usbmidi_standard_output,
+       .output_packet = snd_usbmidi_output_standard_packet,
+};
+
 /*
  * AKAI MPD16 protocol:
  *
@@ -1341,6 +1382,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
                 * Various chips declare a packet size larger than 4 bytes, but
                 * do not actually work with larger packets:
                 */
+       case USB_ID(0x0a67, 0x5011): /* Medeli DD305 */
        case USB_ID(0x0a92, 0x1020): /* ESI M4U */
        case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */
        case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */
@@ -2376,6 +2418,10 @@ int snd_usbmidi_create(struct snd_card *card,
                if (err < 0)
                        break;
 
+               err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
+               break;
+       case QUIRK_MIDI_CH345:
+               umidi->usb_protocol_ops = &snd_usbmidi_ch345_broken_sysex_ops;
                err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
                break;
        default:
index 1a1e2e4..c60a776 100644 (file)
@@ -2829,6 +2829,17 @@ YAMAHA_DEVICE(0x7010, "UB99"),
        .idProduct = 0x1020,
 },
 
+/* QinHeng devices */
+{
+       USB_DEVICE(0x1a86, 0x752d),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "QinHeng",
+               .product_name = "CH345",
+               .ifnum = 1,
+               .type = QUIRK_MIDI_CH345
+       }
+},
+
 /* KeithMcMillen Stringport */
 {
        USB_DEVICE(0x1f38, 0x0001),
index 5ca80e7..7016ad8 100644 (file)
@@ -538,6 +538,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
                [QUIRK_MIDI_CME] = create_any_midi_quirk,
                [QUIRK_MIDI_AKAI] = create_any_midi_quirk,
                [QUIRK_MIDI_FTDI] = create_any_midi_quirk,
+               [QUIRK_MIDI_CH345] = create_any_midi_quirk,
                [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
                [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
                [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
index 15a1271..b665d85 100644 (file)
@@ -95,6 +95,7 @@ enum quirk_type {
        QUIRK_MIDI_AKAI,
        QUIRK_MIDI_US122L,
        QUIRK_MIDI_FTDI,
+       QUIRK_MIDI_CH345,
        QUIRK_AUDIO_STANDARD_INTERFACE,
        QUIRK_AUDIO_FIXED_ENDPOINT,
        QUIRK_AUDIO_EDIROL_UAXX,
index d6f307d..7dc820a 100644 (file)
@@ -32,6 +32,10 @@ help:
        @echo '  from the kernel command line to build and install one of'
        @echo '  the tools above'
        @echo ''
+       @echo '  $$ make tools/all'
+       @echo ''
+       @echo '  builds all tools.'
+       @echo ''
        @echo '  $$ make tools/install'
        @echo ''
        @echo '  installs all tools.'
@@ -77,6 +81,11 @@ tmon: FORCE
 freefall: FORCE
        $(call descend,laptop/$@)
 
+all: acpi cgroup cpupower hv firewire lguest \
+               perf selftests turbostat usb \
+               virtio vm net x86_energy_perf_policy \
+               tmon freefall
+
 acpi_install:
        $(call descend,power/$(@:_install=),install)
 
@@ -101,7 +110,7 @@ freefall_install:
 install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \
                perf_install selftests_install turbostat_install usb_install \
                virtio_install vm_install net_install x86_energy_perf_policy_install \
-               tmon freefall_install
+               tmon_install freefall_install
 
 acpi_clean:
        $(call descend,power/acpi,clean)
index 0a945d2..99d127f 100644 (file)
@@ -675,6 +675,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
                        .fork           = perf_event__repipe,
                        .exit           = perf_event__repipe,
                        .lost           = perf_event__repipe,
+                       .lost_samples   = perf_event__repipe,
                        .aux            = perf_event__repipe,
                        .itrace_start   = perf_event__repipe,
                        .context_switch = perf_event__repipe,
index 2853ad2..f256fac 100644 (file)
@@ -44,7 +44,7 @@
 struct report {
        struct perf_tool        tool;
        struct perf_session     *session;
-       bool                    force, use_tui, use_gtk, use_stdio;
+       bool                    use_tui, use_gtk, use_stdio;
        bool                    hide_unresolved;
        bool                    dont_use_callchains;
        bool                    show_full_info;
@@ -678,7 +678,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
                   "file", "vmlinux pathname"),
        OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
                   "file", "kallsyms pathname"),
-       OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"),
+       OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
        OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
                    "load module symbols - WARNING: use only with -k and LIVE kernel"),
        OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
@@ -832,7 +832,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
        }
 
        file.path  = input_name;
-       file.force = report.force;
+       file.force = symbol_conf.force;
 
 repeat:
        session = perf_session__new(&file, false, &report.tool);
index e5afb89..fa9eb92 100644 (file)
@@ -1430,7 +1430,6 @@ close_file_and_continue:
 
 struct popup_action {
        struct thread           *thread;
-       struct dso              *dso;
        struct map_symbol       ms;
        int                     socket;
 
@@ -1565,7 +1564,6 @@ add_dso_opt(struct hist_browser *browser, struct popup_action *act,
                return 0;
 
        act->ms.map = map;
-       act->dso = map->dso;
        act->fn = do_zoom_dso;
        return 1;
 }
@@ -1827,7 +1825,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
 
        while (1) {
                struct thread *thread = NULL;
-               struct dso *dso = NULL;
                struct map *map = NULL;
                int choice = 0;
                int socked_id = -1;
@@ -1839,8 +1836,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                if (browser->he_selection != NULL) {
                        thread = hist_browser__selected_thread(browser);
                        map = browser->selection->map;
-                       if (map)
-                               dso = map->dso;
                        socked_id = browser->he_selection->socket;
                }
                switch (key) {
@@ -1874,7 +1869,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                        hist_browser__dump(browser);
                        continue;
                case 'd':
-                       actions->dso = dso;
+                       actions->ms.map = map;
                        do_zoom_dso(browser, actions);
                        continue;
                case 'V':
index d909459..217b5a6 100644 (file)
@@ -76,6 +76,7 @@ struct perf_tool build_id__mark_dso_hit_ops = {
        .exit   = perf_event__exit_del_thread,
        .attr            = perf_event__process_attr,
        .build_id        = perf_event__process_build_id,
+       .ordered_events  = true,
 };
 
 int build_id__sprintf(const u8 *build_id, int len, char *bf)
index 7c0c083..425df5c 100644 (file)
@@ -933,6 +933,7 @@ static struct dso *__dso__findlink_by_longname(struct rb_root *root,
                /* Add new node and rebalance tree */
                rb_link_node(&dso->rb_node, parent, p);
                rb_insert_color(&dso->rb_node, root);
+               dso->root = root;
        }
        return NULL;
 }
@@ -945,15 +946,30 @@ static inline struct dso *__dso__find_by_longname(struct rb_root *root,
 
 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
 {
+       struct rb_root *root = dso->root;
+
        if (name == NULL)
                return;
 
        if (dso->long_name_allocated)
                free((char *)dso->long_name);
 
+       if (root) {
+               rb_erase(&dso->rb_node, root);
+               /*
+                * __dso__findlink_by_longname() isn't guaranteed to add it
+                * back, so a clean removal is required here.
+                */
+               RB_CLEAR_NODE(&dso->rb_node);
+               dso->root = NULL;
+       }
+
        dso->long_name           = name;
        dso->long_name_len       = strlen(name);
        dso->long_name_allocated = name_allocated;
+
+       if (root)
+               __dso__findlink_by_longname(root, dso, NULL);
 }
 
 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
@@ -1046,6 +1062,7 @@ struct dso *dso__new(const char *name)
                dso->kernel = DSO_TYPE_USER;
                dso->needs_swap = DSO_SWAP__UNSET;
                RB_CLEAR_NODE(&dso->rb_node);
+               dso->root = NULL;
                INIT_LIST_HEAD(&dso->node);
                INIT_LIST_HEAD(&dso->data.open_entry);
                pthread_mutex_init(&dso->lock, NULL);
index fc8db9c..45ec4d0 100644 (file)
@@ -135,6 +135,7 @@ struct dso {
        pthread_mutex_t  lock;
        struct list_head node;
        struct rb_node   rb_node;       /* rbtree node sorted by long name */
+       struct rb_root   *root;         /* root of rbtree that rb_node is in */
        struct rb_root   symbols[MAP__NR_TYPES];
        struct rb_root   symbol_names[MAP__NR_TYPES];
        struct {
index 5ef90be..8b303ff 100644 (file)
@@ -91,6 +91,7 @@ static void dsos__purge(struct dsos *dsos)
 
        list_for_each_entry_safe(pos, n, &dsos->head, node) {
                RB_CLEAR_NODE(&pos->rb_node);
+               pos->root = NULL;
                list_del_init(&pos->node);
                dso__put(pos);
        }
index bd8f03d..05012bb 100644 (file)
@@ -1183,7 +1183,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
                        container_of(pf, struct trace_event_finder, pf);
        struct perf_probe_point *pp = &pf->pev->point;
        struct probe_trace_event *tev;
-       struct perf_probe_arg *args;
+       struct perf_probe_arg *args = NULL;
        int ret, i;
 
        /* Check number of tevs */
@@ -1198,19 +1198,23 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
        ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
                                     pp->retprobe, pp->function, &tev->point);
        if (ret < 0)
-               return ret;
+               goto end;
 
        tev->point.realname = strdup(dwarf_diename(sc_die));
-       if (!tev->point.realname)
-               return -ENOMEM;
+       if (!tev->point.realname) {
+               ret = -ENOMEM;
+               goto end;
+       }
 
        pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
                 tev->point.offset);
 
        /* Expand special probe argument if exist */
        args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS);
-       if (args == NULL)
-               return -ENOMEM;
+       if (args == NULL) {
+               ret = -ENOMEM;
+               goto end;
+       }
 
        ret = expand_probe_args(sc_die, pf, args);
        if (ret < 0)
@@ -1234,6 +1238,10 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
        }
 
 end:
+       if (ret) {
+               clear_probe_trace_event(tev);
+               tf->ntevs--;
+       }
        free(args);
        return ret;
 }
@@ -1246,7 +1254,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
        struct trace_event_finder tf = {
                        .pf = {.pev = pev, .callback = add_probe_trace_event},
                        .max_tevs = probe_conf.max_probes, .mod = dbg->mod};
-       int ret;
+       int ret, i;
 
        /* Allocate result tevs array */
        *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs);
@@ -1258,6 +1266,8 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
 
        ret = debuginfo__find_probes(dbg, &tf.pf);
        if (ret < 0) {
+               for (i = 0; i < tf.ntevs; i++)
+                       clear_probe_trace_event(&tf.tevs[i]);
                zfree(tevs);
                return ret;
        }
index b4cc766..cd08027 100644 (file)
@@ -654,19 +654,24 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
        struct map_groups *kmaps = map__kmaps(map);
        struct map *curr_map;
        struct symbol *pos;
-       int count = 0, moved = 0;
+       int count = 0;
+       struct rb_root old_root = dso->symbols[map->type];
        struct rb_root *root = &dso->symbols[map->type];
        struct rb_node *next = rb_first(root);
 
        if (!kmaps)
                return -1;
 
+       *root = RB_ROOT;
+
        while (next) {
                char *module;
 
                pos = rb_entry(next, struct symbol, rb_node);
                next = rb_next(&pos->rb_node);
 
+               rb_erase_init(&pos->rb_node, &old_root);
+
                module = strchr(pos->name, '\t');
                if (module)
                        *module = '\0';
@@ -674,28 +679,21 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
                curr_map = map_groups__find(kmaps, map->type, pos->start);
 
                if (!curr_map || (filter && filter(curr_map, pos))) {
-                       rb_erase_init(&pos->rb_node, root);
                        symbol__delete(pos);
-               } else {
-                       pos->start -= curr_map->start - curr_map->pgoff;
-                       if (pos->end)
-                               pos->end -= curr_map->start - curr_map->pgoff;
-                       if (curr_map->dso != map->dso) {
-                               rb_erase_init(&pos->rb_node, root);
-                               symbols__insert(
-                                       &curr_map->dso->symbols[curr_map->type],
-                                       pos);
-                               ++moved;
-                       } else {
-                               ++count;
-                       }
+                       continue;
                }
+
+               pos->start -= curr_map->start - curr_map->pgoff;
+               if (pos->end)
+                       pos->end -= curr_map->start - curr_map->pgoff;
+               symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
+               ++count;
        }
 
        /* Symbols have been adjusted */
        dso->adjust_symbols = 1;
 
-       return count + moved;
+       return count;
 }
 
 /*
@@ -1438,9 +1436,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
                if (lstat(dso->name, &st) < 0)
                        goto out;
 
-               if (st.st_uid && (st.st_uid != geteuid())) {
+               if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
                        pr_warning("File %s not owned by current user or root, "
-                               "ignoring it.\n", dso->name);
+                                  "ignoring it (use -f to override).\n", dso->name);
                        goto out;
                }
 
index 40073c6..dcd786e 100644 (file)
@@ -84,6 +84,7 @@ struct symbol_conf {
        unsigned short  priv_size;
        unsigned short  nr_events;
        bool            try_vmlinux_path,
+                       force,
                        ignore_vmlinux,
                        ignore_vmlinux_buildid,
                        show_kernel_path,
index d8e4b20..0dac7e0 100644 (file)
@@ -1173,9 +1173,9 @@ dump_nhm_platform_info(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
+       get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
 
-       fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
+       fprintf(stderr, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
 
        ratio = (msr >> 40) & 0xFF;
        fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
@@ -1807,7 +1807,7 @@ void check_permissions()
  *
  * MSR_SMI_COUNT                   0x00000034
  *
- * MSR_NHM_PLATFORM_INFO           0x000000ce
+ * MSR_PLATFORM_INFO               0x000000ce
  * MSR_NHM_SNB_PKG_CST_CFG_CTL     0x000000e2
  *
  * MSR_PKG_C3_RESIDENCY            0x000003f8
@@ -1876,7 +1876,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
        get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
        pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
 
-       get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
+       get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
        base_ratio = (msr >> 8) & 0xFF;
 
        base_hz = base_ratio * bclk * 1000000;
index 3224a04..0558bb9 100644 (file)
@@ -27,7 +27,7 @@ o The build system shall remain as simple as possible, avoiding any archive or
 o Where possible, any helper functions or other package-wide code shall be
   implemented in header files, avoiding the need to compile intermediate object
   files.
-o External dependendencies shall remain as minimal as possible. Currently gcc
+o External dependencies shall remain as minimal as possible. Currently gcc
   and glibc are the only dependencies.
 o Tests return 0 for success and < 0 for failure.
 
index e38cc54..882fe83 100644 (file)
@@ -492,6 +492,9 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
        pid_t parent = getppid();
        int fd;
        void *map1, *map2;
+       int page_size = sysconf(_SC_PAGESIZE);
+
+       ASSERT_LT(0, page_size);
 
        ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
        ASSERT_EQ(0, ret);
@@ -504,16 +507,16 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
 
        EXPECT_EQ(parent, syscall(__NR_getppid));
        map1 = (void *)syscall(sysno,
-               NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, PAGE_SIZE);
+               NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size);
        EXPECT_NE(MAP_FAILED, map1);
        /* mmap2() should never return. */
        map2 = (void *)syscall(sysno,
-                NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE);
+                NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE);
        EXPECT_EQ(MAP_FAILED, map2);
 
        /* The test failed, so clean up the resources. */
-       munmap(map1, PAGE_SIZE);
-       munmap(map2, PAGE_SIZE);
+       munmap(map1, page_size);
+       munmap(map2, page_size);
        close(fd);
 }
 
index bcf5ec7..5a60162 100644 (file)
@@ -128,6 +128,7 @@ static const char * const page_flag_names[] = {
        [KPF_THP]               = "t:thp",
        [KPF_BALLOON]           = "o:balloon",
        [KPF_ZERO_PAGE]         = "z:zero_page",
+       [KPF_IDLE]              = "i:idle_page",
 
        [KPF_RESERVED]          = "r:reserved",
        [KPF_MLOCKED]           = "m:mlocked",
index 21a0ab2..69bca18 100644 (file)
@@ -221,17 +221,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
        kvm_timer_update_state(vcpu);
 
        /*
-        * If we enter the guest with the virtual input level to the VGIC
-        * asserted, then we have already told the VGIC what we need to, and
-        * we don't need to exit from the guest until the guest deactivates
-        * the already injected interrupt, so therefore we should set the
-        * hardware active state to prevent unnecessary exits from the guest.
-        *
-        * Conversely, if the virtual input level is deasserted, then always
-        * clear the hardware active state to ensure that hardware interrupts
-        * from the timer triggers a guest exit.
-        */
-       if (timer->irq.level)
+       * If we enter the guest with the virtual input level to the VGIC
+       * asserted, then we have already told the VGIC what we need to, and
+       * we don't need to exit from the guest until the guest deactivates
+       * the already injected interrupt, so therefore we should set the
+       * hardware active state to prevent unnecessary exits from the guest.
+       *
+       * Also, if we enter the guest with the virtual timer interrupt active,
+       * then it must be active on the physical distributor, because we set
+       * the HW bit and the guest must be able to deactivate the virtual and
+       * physical interrupt at the same time.
+       *
+       * Conversely, if the virtual input level is deasserted and the virtual
+       * interrupt is not active, then always clear the hardware active state
+       * to ensure that hardware interrupts from the timer triggers a guest
+       * exit.
+       */
+       if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map))
                phys_active = true;
        else
                phys_active = false;
index 5335383..65461f8 100644 (file)
@@ -1096,6 +1096,27 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
        vgic_set_lr(vcpu, lr_nr, vlr);
 }
 
+static bool dist_active_irq(struct kvm_vcpu *vcpu)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
+}
+
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
+{
+       int i;
+
+       for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
+               struct vgic_lr vlr = vgic_get_lr(vcpu, i);
+
+               if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
+                       return true;
+       }
+
+       return dist_active_irq(vcpu);
+}
+
 /*
  * An interrupt may have been disabled after being made pending on the
  * CPU interface (the classic case is a timer running while we're
@@ -1248,7 +1269,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
         * may have been serviced from another vcpu. In all cases,
         * move along.
         */
-       if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu))
+       if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu))
                goto epilog;
 
        /* SGIs */
@@ -1396,25 +1417,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
 {
        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-       struct irq_phys_map *map;
-       bool phys_active;
        bool level_pending;
-       int ret;
 
        if (!(vlr.state & LR_HW))
                return false;
 
-       map = vgic_irq_map_search(vcpu, vlr.irq);
-       BUG_ON(!map);
-
-       ret = irq_get_irqchip_state(map->irq,
-                                   IRQCHIP_STATE_ACTIVE,
-                                   &phys_active);
-
-       WARN_ON(ret);
-
-       if (phys_active)
-               return 0;
+       if (vlr.state & LR_STATE_ACTIVE)
+               return false;
 
        spin_lock(&dist->lock);
        level_pending = process_queued_irq(vcpu, lr, vlr);
@@ -1479,17 +1488,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
        return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
 }
 
-int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
-       if (!irqchip_in_kernel(vcpu->kvm))
-               return 0;
-
-       return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
-}
-
-
 void vgic_kick_vcpus(struct kvm *kvm)
 {
        struct kvm_vcpu *vcpu;